diff --git a/sphinx/_build/doctrees/PAMI.AssociationRules.basic.doctree b/sphinx/_build/doctrees/PAMI.AssociationRules.basic.doctree new file mode 100644 index 000000000..131470267 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.AssociationRules.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.AssociationRules.doctree b/sphinx/_build/doctrees/PAMI.AssociationRules.doctree new file mode 100644 index 000000000..3258bf7d0 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.AssociationRules.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.correlatedPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.correlatedPattern.basic.doctree new file mode 100644 index 000000000..4c08ad3fc Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.correlatedPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.correlatedPattern.doctree b/sphinx/_build/doctrees/PAMI.correlatedPattern.doctree new file mode 100644 index 000000000..0087c4e80 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.correlatedPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.coveragePattern.basic.doctree b/sphinx/_build/doctrees/PAMI.coveragePattern.basic.doctree new file mode 100644 index 000000000..88a9dec49 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.coveragePattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.coveragePattern.doctree b/sphinx/_build/doctrees/PAMI.coveragePattern.doctree new file mode 100644 index 000000000..c43216c98 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.coveragePattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.doctree b/sphinx/_build/doctrees/PAMI.doctree new file mode 100644 index 000000000..850588389 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.extras.DF2DB.doctree b/sphinx/_build/doctrees/PAMI.extras.DF2DB.doctree new file mode 100644 index 000000000..a71c1ed2c Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.extras.DF2DB.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.extras.calculateMISValues.doctree b/sphinx/_build/doctrees/PAMI.extras.calculateMISValues.doctree new file mode 100644 index 000000000..64f057266 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.extras.calculateMISValues.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.extras.dbStats.doctree b/sphinx/_build/doctrees/PAMI.extras.dbStats.doctree new file mode 100644 index 000000000..126847940 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.extras.dbStats.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.extras.doctree b/sphinx/_build/doctrees/PAMI.extras.doctree new file mode 100644 index 000000000..307d0cc2c Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.extras.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.extras.fuzzyTransformation.doctree b/sphinx/_build/doctrees/PAMI.extras.fuzzyTransformation.doctree new file mode 100644 index 000000000..cbf8c9c8b Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.extras.fuzzyTransformation.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.extras.generateDatabase.doctree b/sphinx/_build/doctrees/PAMI.extras.generateDatabase.doctree new file mode 100644 index 000000000..8120f1db2 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.extras.generateDatabase.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.extras.graph.doctree b/sphinx/_build/doctrees/PAMI.extras.graph.doctree new file mode 100644 index 000000000..7e746988d Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.extras.graph.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.extras.image2Database.doctree b/sphinx/_build/doctrees/PAMI.extras.image2Database.doctree new file mode 100644 index 000000000..58bf8f55e Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.extras.image2Database.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.extras.imageProcessing.doctree b/sphinx/_build/doctrees/PAMI.extras.imageProcessing.doctree new file mode 100644 index 000000000..ec5d14937 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.extras.imageProcessing.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.extras.messaging.doctree b/sphinx/_build/doctrees/PAMI.extras.messaging.doctree new file mode 100644 index 000000000..996cce6af Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.extras.messaging.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.extras.neighbours.doctree b/sphinx/_build/doctrees/PAMI.extras.neighbours.doctree new file mode 100644 index 000000000..036031336 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.extras.neighbours.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.extras.sampleDatasets.doctree b/sphinx/_build/doctrees/PAMI.extras.sampleDatasets.doctree new file mode 100644 index 000000000..260695277 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.extras.sampleDatasets.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.extras.stats.doctree b/sphinx/_build/doctrees/PAMI.extras.stats.doctree new file mode 100644 index 000000000..034d6228b Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.extras.stats.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.extras.syntheticDataGenerator.doctree b/sphinx/_build/doctrees/PAMI.extras.syntheticDataGenerator.doctree new file mode 100644 index 000000000..ff1f042fd Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.extras.syntheticDataGenerator.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.extras.visualize.doctree b/sphinx/_build/doctrees/PAMI.extras.visualize.doctree new file mode 100644 index 000000000..1a3fc20cb Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.extras.visualize.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.faultTolerantFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.faultTolerantFrequentPattern.basic.doctree new file mode 100644 index 000000000..57522a4c2 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.faultTolerantFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.faultTolerantFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.faultTolerantFrequentPattern.doctree new file mode 100644 index 000000000..9c55b7e14 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.faultTolerantFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.frequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.frequentPattern.basic.doctree new file mode 100644 index 000000000..edfaadaa6 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.frequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.frequentPattern.closed.doctree b/sphinx/_build/doctrees/PAMI.frequentPattern.closed.doctree new file mode 100644 index 000000000..4600083ec Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.frequentPattern.closed.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.frequentPattern.cuda.doctree b/sphinx/_build/doctrees/PAMI.frequentPattern.cuda.doctree new file mode 100644 index 000000000..1765a0750 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.frequentPattern.cuda.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.frequentPattern.doctree b/sphinx/_build/doctrees/PAMI.frequentPattern.doctree new file mode 100644 index 000000000..b8f12451f Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.frequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.frequentPattern.maximal.doctree b/sphinx/_build/doctrees/PAMI.frequentPattern.maximal.doctree new file mode 100644 index 000000000..e790603f5 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.frequentPattern.maximal.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.frequentPattern.pyspark.doctree b/sphinx/_build/doctrees/PAMI.frequentPattern.pyspark.doctree new file mode 100644 index 000000000..eb647902b Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.frequentPattern.pyspark.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.frequentPattern.topk.doctree b/sphinx/_build/doctrees/PAMI.frequentPattern.topk.doctree new file mode 100644 index 000000000..799d5b5e5 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.frequentPattern.topk.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.fuzzyCorrelatedPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.fuzzyCorrelatedPattern.basic.doctree new file mode 100644 index 000000000..351682eb1 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.fuzzyCorrelatedPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.fuzzyCorrelatedPattern.doctree b/sphinx/_build/doctrees/PAMI.fuzzyCorrelatedPattern.doctree new file mode 100644 index 000000000..cd2c29563 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.fuzzyCorrelatedPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.fuzzyFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.fuzzyFrequentPattern.basic.doctree new file mode 100644 index 000000000..0b80b4ab4 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.fuzzyFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.fuzzyFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.fuzzyFrequentPattern.doctree new file mode 100644 index 000000000..11a8533ef Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.fuzzyFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.fuzzyGeoreferencedFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.fuzzyGeoreferencedFrequentPattern.basic.doctree new file mode 100644 index 000000000..795737ebb Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.fuzzyGeoreferencedFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.fuzzyGeoreferencedFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.fuzzyGeoreferencedFrequentPattern.doctree new file mode 100644 index 000000000..2fbcebdd4 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.fuzzyGeoreferencedFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.doctree new file mode 100644 index 000000000..24ca45dcd Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.doctree new file mode 100644 index 000000000..a00418eca Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.fuzzyPartialPeriodicPatterns.basic.doctree b/sphinx/_build/doctrees/PAMI.fuzzyPartialPeriodicPatterns.basic.doctree new file mode 100644 index 000000000..30a845677 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.fuzzyPartialPeriodicPatterns.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.fuzzyPartialPeriodicPatterns.doctree b/sphinx/_build/doctrees/PAMI.fuzzyPartialPeriodicPatterns.doctree new file mode 100644 index 000000000..7b4fc08dd Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.fuzzyPartialPeriodicPatterns.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.fuzzyPeriodicFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.fuzzyPeriodicFrequentPattern.basic.doctree new file mode 100644 index 000000000..961bab6aa Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.fuzzyPeriodicFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.fuzzyPeriodicFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.fuzzyPeriodicFrequentPattern.doctree new file mode 100644 index 000000000..969967b0a Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.fuzzyPeriodicFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.geoReferencedPeriodicFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.geoReferencedPeriodicFrequentPattern.basic.doctree new file mode 100644 index 000000000..d60da2bf2 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.geoReferencedPeriodicFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.geoReferencedPeriodicFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.geoReferencedPeriodicFrequentPattern.doctree new file mode 100644 index 000000000..1bb93b4e0 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.geoReferencedPeriodicFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.georeferencedFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.georeferencedFrequentPattern.basic.doctree new file mode 100644 index 000000000..1e28528c4 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.georeferencedFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.georeferencedFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.georeferencedFrequentPattern.doctree new file mode 100644 index 000000000..282393934 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.georeferencedFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.georeferencedFrequentSequencePattern.doctree b/sphinx/_build/doctrees/PAMI.georeferencedFrequentSequencePattern.doctree new file mode 100644 index 000000000..e4b78c883 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.georeferencedFrequentSequencePattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.georeferencedPartialPeriodicPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.georeferencedPartialPeriodicPattern.basic.doctree new file mode 100644 index 000000000..13387cb95 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.georeferencedPartialPeriodicPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.georeferencedPartialPeriodicPattern.doctree b/sphinx/_build/doctrees/PAMI.georeferencedPartialPeriodicPattern.doctree new file mode 100644 index 000000000..9521adff9 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.georeferencedPartialPeriodicPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.highUtilityFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.highUtilityFrequentPattern.basic.doctree new file mode 100644 index 000000000..831ac75ce Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.highUtilityFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.highUtilityFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.highUtilityFrequentPattern.doctree new file mode 100644 index 000000000..0ff1bec4c Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.highUtilityFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.highUtilityGeoreferencedFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.highUtilityGeoreferencedFrequentPattern.basic.doctree new file mode 100644 index 000000000..ef474c5b0 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.highUtilityGeoreferencedFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.highUtilityGeoreferencedFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.highUtilityGeoreferencedFrequentPattern.doctree new file mode 100644 index 000000000..702938ed7 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.highUtilityGeoreferencedFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.highUtilityPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.highUtilityPattern.basic.doctree new file mode 100644 index 000000000..4f6ebdbee Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.highUtilityPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.highUtilityPattern.doctree b/sphinx/_build/doctrees/PAMI.highUtilityPattern.doctree new file mode 100644 index 000000000..8bdbb7ed9 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.highUtilityPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.highUtilityPattern.parallel.doctree b/sphinx/_build/doctrees/PAMI.highUtilityPattern.parallel.doctree new file mode 100644 index 000000000..95172cd97 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.highUtilityPattern.parallel.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.highUtilityPatternsInStreams.doctree b/sphinx/_build/doctrees/PAMI.highUtilityPatternsInStreams.doctree new file mode 100644 index 000000000..1c86cd139 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.highUtilityPatternsInStreams.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.highUtilitySpatialPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.highUtilitySpatialPattern.basic.doctree new file mode 100644 index 000000000..06c21102d Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.highUtilitySpatialPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.highUtilitySpatialPattern.doctree b/sphinx/_build/doctrees/PAMI.highUtilitySpatialPattern.doctree new file mode 100644 index 000000000..4fe8c52bf Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.highUtilitySpatialPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.highUtilitySpatialPattern.topk.doctree b/sphinx/_build/doctrees/PAMI.highUtilitySpatialPattern.topk.doctree new file mode 100644 index 000000000..4f4a5d306 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.highUtilitySpatialPattern.topk.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.localPeriodicPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.localPeriodicPattern.basic.doctree new file mode 100644 index 000000000..7c376fa8c Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.localPeriodicPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.localPeriodicPattern.doctree b/sphinx/_build/doctrees/PAMI.localPeriodicPattern.doctree new file mode 100644 index 000000000..e663beca0 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.localPeriodicPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.multipleMinimumSupportBasedFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.multipleMinimumSupportBasedFrequentPattern.basic.doctree new file mode 100644 index 000000000..f971df5a5 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.multipleMinimumSupportBasedFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.multipleMinimumSupportBasedFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.multipleMinimumSupportBasedFrequentPattern.doctree new file mode 100644 index 000000000..fe97e6c0f Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.multipleMinimumSupportBasedFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.partialPeriodicFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.partialPeriodicFrequentPattern.basic.doctree new file mode 100644 index 000000000..8675055ad Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.partialPeriodicFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.partialPeriodicFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.partialPeriodicFrequentPattern.doctree new file mode 100644 index 000000000..fd270a8af Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.partialPeriodicFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.partialPeriodicPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.partialPeriodicPattern.basic.doctree new file mode 100644 index 000000000..408d318ce Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.partialPeriodicPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.partialPeriodicPattern.closed.doctree b/sphinx/_build/doctrees/PAMI.partialPeriodicPattern.closed.doctree new file mode 100644 index 000000000..23d8dbc2f Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.partialPeriodicPattern.closed.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.partialPeriodicPattern.doctree b/sphinx/_build/doctrees/PAMI.partialPeriodicPattern.doctree new file mode 100644 index 000000000..834ed3034 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.partialPeriodicPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.partialPeriodicPattern.maximal.doctree b/sphinx/_build/doctrees/PAMI.partialPeriodicPattern.maximal.doctree new file mode 100644 index 000000000..077a5ec0a Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.partialPeriodicPattern.maximal.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.partialPeriodicPattern.pyspark.doctree b/sphinx/_build/doctrees/PAMI.partialPeriodicPattern.pyspark.doctree new file mode 100644 index 000000000..eb9fbd1e4 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.partialPeriodicPattern.pyspark.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.partialPeriodicPattern.topk.doctree b/sphinx/_build/doctrees/PAMI.partialPeriodicPattern.topk.doctree new file mode 100644 index 000000000..0ae898398 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.partialPeriodicPattern.topk.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.partialPeriodicPatternInMultipleTimeSeries.doctree b/sphinx/_build/doctrees/PAMI.partialPeriodicPatternInMultipleTimeSeries.doctree new file mode 100644 index 000000000..6f8207373 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.partialPeriodicPatternInMultipleTimeSeries.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.periodicCorrelatedPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.periodicCorrelatedPattern.basic.doctree new file mode 100644 index 000000000..2e0c07475 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.periodicCorrelatedPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.periodicCorrelatedPattern.doctree b/sphinx/_build/doctrees/PAMI.periodicCorrelatedPattern.doctree new file mode 100644 index 000000000..23f0c1133 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.periodicCorrelatedPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.basic.doctree new file mode 100644 index 000000000..fe7fa5f12 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.closed.doctree b/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.closed.doctree new file mode 100644 index 000000000..9aa39c488 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.closed.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.cuda.doctree b/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.cuda.doctree new file mode 100644 index 000000000..ff7ec751e Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.cuda.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.doctree new file mode 100644 index 000000000..2e983eece Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.maximal.doctree b/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.maximal.doctree new file mode 100644 index 000000000..5287106ce Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.maximal.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.pyspark.doctree b/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.pyspark.doctree new file mode 100644 index 000000000..a1dec18a5 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.pyspark.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.topk.TopkPFP.doctree b/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.topk.TopkPFP.doctree new file mode 100644 index 000000000..3d407f3dc Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.topk.TopkPFP.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.topk.doctree b/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.topk.doctree new file mode 100644 index 000000000..04358ff91 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.topk.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.topk.kPFPMiner.doctree b/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.topk.kPFPMiner.doctree new file mode 100644 index 000000000..2329f1f98 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.periodicFrequentPattern.topk.kPFPMiner.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.recurringPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.recurringPattern.basic.doctree new file mode 100644 index 000000000..722b98d3c Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.recurringPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.recurringPattern.doctree b/sphinx/_build/doctrees/PAMI.recurringPattern.doctree new file mode 100644 index 000000000..0adff2a16 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.recurringPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.relativeFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.relativeFrequentPattern.basic.doctree new file mode 100644 index 000000000..bbd37c830 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.relativeFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.relativeFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.relativeFrequentPattern.doctree new file mode 100644 index 000000000..a88f134bc Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.relativeFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.relativeHighUtilityPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.relativeHighUtilityPattern.basic.doctree new file mode 100644 index 000000000..bed1d13be Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.relativeHighUtilityPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.relativeHighUtilityPattern.doctree b/sphinx/_build/doctrees/PAMI.relativeHighUtilityPattern.doctree new file mode 100644 index 000000000..6e0dd2696 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.relativeHighUtilityPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.sequence.doctree b/sphinx/_build/doctrees/PAMI.sequence.doctree new file mode 100644 index 000000000..ba13215b4 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.sequence.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.sequentialPatternMining.basic.doctree b/sphinx/_build/doctrees/PAMI.sequentialPatternMining.basic.doctree new file mode 100644 index 000000000..2808dcd37 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.sequentialPatternMining.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.sequentialPatternMining.closed.doctree b/sphinx/_build/doctrees/PAMI.sequentialPatternMining.closed.doctree new file mode 100644 index 000000000..069fdb7b4 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.sequentialPatternMining.closed.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.sequentialPatternMining.doctree b/sphinx/_build/doctrees/PAMI.sequentialPatternMining.doctree new file mode 100644 index 000000000..e1ad0b53e Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.sequentialPatternMining.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.stablePeriodicFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.stablePeriodicFrequentPattern.basic.doctree new file mode 100644 index 000000000..58b6ee761 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.stablePeriodicFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.stablePeriodicFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.stablePeriodicFrequentPattern.doctree new file mode 100644 index 000000000..adea7cd6e Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.stablePeriodicFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.stablePeriodicFrequentPattern.topK.doctree b/sphinx/_build/doctrees/PAMI.stablePeriodicFrequentPattern.topK.doctree new file mode 100644 index 000000000..e227148f6 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.stablePeriodicFrequentPattern.topK.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.subgraphMining.basic.doctree b/sphinx/_build/doctrees/PAMI.subgraphMining.basic.doctree new file mode 100644 index 000000000..37807ef3b Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.subgraphMining.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.subgraphMining.doctree b/sphinx/_build/doctrees/PAMI.subgraphMining.doctree new file mode 100644 index 000000000..af57b72ec Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.subgraphMining.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.subgraphMining.topK.doctree b/sphinx/_build/doctrees/PAMI.subgraphMining.topK.doctree new file mode 100644 index 000000000..9ea68103a Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.subgraphMining.topK.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.uncertainFaultTolerantFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.uncertainFaultTolerantFrequentPattern.doctree new file mode 100644 index 000000000..6d3b69f69 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.uncertainFaultTolerantFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.uncertainFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.uncertainFrequentPattern.basic.doctree new file mode 100644 index 000000000..24d4a81ab Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.uncertainFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.uncertainFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.uncertainFrequentPattern.doctree new file mode 100644 index 000000000..aa5ab9389 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.uncertainFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.uncertainGeoreferencedFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.uncertainGeoreferencedFrequentPattern.basic.doctree new file mode 100644 index 000000000..e45599bb4 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.uncertainGeoreferencedFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.uncertainGeoreferencedFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.uncertainGeoreferencedFrequentPattern.doctree new file mode 100644 index 000000000..5bbfd5385 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.uncertainGeoreferencedFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.uncertainPeriodicFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.uncertainPeriodicFrequentPattern.basic.doctree new file mode 100644 index 000000000..d8f5d6aeb Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.uncertainPeriodicFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.uncertainPeriodicFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.uncertainPeriodicFrequentPattern.doctree new file mode 100644 index 000000000..8feb26493 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.uncertainPeriodicFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.weightedFrequentNeighbourhoodPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.weightedFrequentNeighbourhoodPattern.basic.doctree new file mode 100644 index 000000000..a9d631eab Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.weightedFrequentNeighbourhoodPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.weightedFrequentNeighbourhoodPattern.doctree b/sphinx/_build/doctrees/PAMI.weightedFrequentNeighbourhoodPattern.doctree new file mode 100644 index 000000000..e44c4e34e Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.weightedFrequentNeighbourhoodPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.weightedFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.weightedFrequentPattern.basic.doctree new file mode 100644 index 000000000..7b32f106f Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.weightedFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.weightedFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.weightedFrequentPattern.doctree new file mode 100644 index 000000000..609aec512 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.weightedFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.weightedFrequentRegularPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.weightedFrequentRegularPattern.basic.doctree new file mode 100644 index 000000000..e428f4f43 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.weightedFrequentRegularPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.weightedFrequentRegularPattern.doctree b/sphinx/_build/doctrees/PAMI.weightedFrequentRegularPattern.doctree new file mode 100644 index 000000000..420ca2c47 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.weightedFrequentRegularPattern.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.weightedUncertainFrequentPattern.basic.doctree b/sphinx/_build/doctrees/PAMI.weightedUncertainFrequentPattern.basic.doctree new file mode 100644 index 000000000..b0047b0ee Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.weightedUncertainFrequentPattern.basic.doctree differ diff --git a/sphinx/_build/doctrees/PAMI.weightedUncertainFrequentPattern.doctree b/sphinx/_build/doctrees/PAMI.weightedUncertainFrequentPattern.doctree new file mode 100644 index 000000000..9a3866d95 Binary files /dev/null and b/sphinx/_build/doctrees/PAMI.weightedUncertainFrequentPattern.doctree differ diff --git a/sphinx/_build/doctrees/environment.pickle b/sphinx/_build/doctrees/environment.pickle new file mode 100644 index 000000000..0007dc851 Binary files /dev/null and b/sphinx/_build/doctrees/environment.pickle differ diff --git a/sphinx/_build/doctrees/index.doctree b/sphinx/_build/doctrees/index.doctree new file mode 100644 index 000000000..18a66844b Binary files /dev/null and b/sphinx/_build/doctrees/index.doctree differ diff --git a/sphinx/_build/doctrees/modules.doctree b/sphinx/_build/doctrees/modules.doctree new file mode 100644 index 000000000..f3da8bec5 Binary files /dev/null and b/sphinx/_build/doctrees/modules.doctree differ diff --git a/sphinx/_build/html/.buildinfo b/sphinx/_build/html/.buildinfo new file mode 100644 index 000000000..1962979ae --- /dev/null +++ b/sphinx/_build/html/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: dba6a052199479a76c7239c0416fc44e +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/sphinx/_build/html/PAMI.AssociationRules.basic.html b/sphinx/_build/html/PAMI.AssociationRules.basic.html new file mode 100644 index 000000000..18a1206d7 --- /dev/null +++ b/sphinx/_build/html/PAMI.AssociationRules.basic.html @@ -0,0 +1,1046 @@ + + + + + + + PAMI.AssociationRules.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.AssociationRules.basic package

+
+

Submodules

+
+
+

PAMI.AssociationRules.basic.ARWithConfidence module

+
+
+class PAMI.AssociationRules.basic.ARWithConfidence.ARWithConfidence(iFile, minConf, sep)[source]
+

Bases: object

+
+

About this algorithm

+
+
Description:
+

Association Rules are derived from frequent patterns using “confidence” metric.

+
+
Reference:
+

+
param iFile:
+

str : +Name of the Input file to mine complete set of association rules

+
+
param oFile:
+

str : +Name of the output file to store complete set of association rules

+
+
param minConf:
+

float : +The user can specify the minConf in float between the range of 0 to 1.

+
+
param sep:
+

str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

+
+
Attributes:
+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
+
+
+
+
+

Execution methods

+

Terminal command

+
Format:
+
+(.venv) $ python3 ARWithConfidence.py <inputFile> <outputFile> <minConf> <sep>
+
+Example Usage:
+
+(.venv) $ python3 ARWithConfidence.py sampleDB.txt patterns.txt 0.5 ' '
+
+
+
+

Note

+

minConf can be specified in a value between 0 and 1.

+
+

Calling from a python program

+
import PAMI.AssociationRules.basic import ARWithConfidence as alg
+
+obj = alg.ARWithConfidence(iFile, minConf)
+
+obj.mine()
+
+associationRules = obj.getPatterns()
+
+print("Total number of Association Rules:", len(associationRules))
+
+obj.save(oFile)
+
+Df = obj.getPatternInDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine()[source]
+

Association rule mining process will start from here

+
+ +
+
+printResults()[source]
+

Function to send the result after completion of the mining process

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (file) – name of the outputfile

+
+
+
+ +
+
+startMine()[source]
+

Association rule mining process will start from here

+
+ +
+
+ +
+
+

PAMI.AssociationRules.basic.ARWithLeverage module

+
+
+class PAMI.AssociationRules.basic.ARWithLeverage.ARWithLeverage(iFile, minConf, sep)[source]
+

Bases: object

+
+

About this algorithm

+
+
Description:
+

Association Rules are derived from frequent patterns using “leverage” metric.

+
+
Reference:
+

+
param iFile:
+

str : +Name of the Input file to mine complete set of association rules

+
+
param oFile:
+

str : +Name of the output file to store complete set of association rules

+
+
param minConf:
+

float : +The user can specify the minConf in float between the range of 0 to 1.

+
+
param sep:
+

str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

+
+
Attributes:
+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
+
+
+
+
+

Execution methods

+

Terminal command

+
Format:
+
+(.venv) $ python3 ARWithLeverage.py <inputFile> <outputFile> <minConf> <sep>
+
+Example Usage:
+
+(.venv) $ python3 ARWithLeverage.py sampleDB.txt patterns.txt 10.0 ' '
+
+
+
+

Note

+

minConf can be specified in a value between 0 and 1.

+
+

Calling from a python program

+
import PAMI.AssociationRules.basic import ARWithLeverage as alg
+
+obj = alg.ARWithLeverage(iFile, minConf)
+
+obj.mine()
+
+associationRules = obj.getPatterns()
+
+print("Total number of Association Rules:", len(associationRules))
+
+obj.save(oFile)
+
+Df = obj.getPatternInDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() dict[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Association rule mining process will start from here

+
+ +
+
+printResults() None[source]
+

Function to send the result after completion of the mining process

+
+ +
+
+save(outFile) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (file) – name of the outputfile

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Association rule mining process will start from here

+
+ +
+
+ +
+
+

PAMI.AssociationRules.basic.ARWithLift module

+
+
+class PAMI.AssociationRules.basic.ARWithLift.ARWithLift(iFile, minConf, sep)[source]
+

Bases: object

+
+

About this algorithm

+
+
Description:
+

Association Rules are derived from frequent patterns using “lift” metric.

+
+
Reference:
+

+
param iFile:
+

str : +Name of the Input file to mine complete set of association rules

+
+
param oFile:
+

str : +Name of the output file to store complete set of association rules

+
+
param minConf:
+

float : +The user can specify the minConf in float between the range of 0 to 1.

+
+
param sep:
+

str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

+
+
Attributes:
+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
+
+
+
+
+

Execution methods

+

Terminal command

+
Format:
+
+(.venv) $ python3 ARWithLift.py <inputFile> <outputFile> <minConf> <sep>
+
+Example Usage:
+
+(.venv) $ python3 ARWithLift.py sampleDB.txt patterns.txt 0.5 ' '
+
+
+
+

Note

+

minConf can be specified in a value between 0 and 1.

+
+

Calling from a python program

+
import PAMI.AssociationRules.basic import ARWithLift as alg
+
+obj = alg.ARWithLift(iFile, minConf)
+
+obj.mine()
+
+associationRules = obj.getPatterns()
+
+print("Total number of Association Rules:", len(associationRules))
+
+obj.save(oFile)
+
+Df = obj.getPatternInDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() dict[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Association rule mining process will start from here

+
+ +
+
+printResults() None[source]
+

Function to send the result after completion of the mining process

+
+ +
+
+save(outFile) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (file) – name of the outputfile

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Association rule mining process will start from here

+
+ +
+
+ +
+
+class PAMI.AssociationRules.basic.ARWithLift.Lift(patterns, singleItems, minConf)[source]
+

Bases: object

+
+
Parameters:
+
    +
  • patterns (dict) – Dictionary containing patterns and its support value.

  • +
  • singleItems (list) – List containing all the single frequent items.

  • +
  • minConf (int) – Minimum confidence to mine all the satisfying association rules.

  • +
+
+
+
+
+run() None[source]
+

To generate the combinations all association rules.

+
+ +
+ +
+
+

PAMI.AssociationRules.basic.RuleMiner module

+
+
+class PAMI.AssociationRules.basic.RuleMiner.Confidence(patterns, singleItems, threshold)[source]
+

Bases: object

+

Association Rules are derived from frequent patterns using “confidence” metric.

+
+
+run()[source]
+

To generate the combinations all association rules.

+
+ +
+ +
+
+class PAMI.AssociationRules.basic.RuleMiner.Leverage(patterns, singleItems, threshold)[source]
+

Bases: object

+

Association Rules are derived from frequent patterns using “leverage” metric.

+
+
+run()[source]
+

To generate the combinations all association rules.

+
+ +
+ +
+
+class PAMI.AssociationRules.basic.RuleMiner.Lift(patterns, singleItems, threshold)[source]
+

Bases: object

+

Association Rules are derived from frequent patterns using “lift” metric.

+
+
+run()[source]
+

To generate the combinations all association rules.

+
+ +
+ +
+
+class PAMI.AssociationRules.basic.RuleMiner.RuleMiner(iFile, measure, threshold, sep)[source]
+

Bases: object

+
+

About this algorithm

+
+
Description:
+

RuleMiner code is used to extract the association rules from given frequent patterns

+
+
Reference:
+

+
param iFile:
+

str : +Name of the Input file to mine complete set of association rules

+
+
param oFile:
+

str : +Name of the output file to store complete set of association rules

+
+
param minConf:
+

float : +The user can specify the minConf in float between the range of 0 to 1.

+
+
param frequentPattern:
+

list or dict : +frequent patterns are stored in the form of list or dictionary

+
+
param measure:
+

str : +condition to calculate the strength of rule

+
+
param threshold:
+

int : +condition to satisfy

+
+
param sep:
+

str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

+
+
Attributes:
+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
+
+
+
+
+

Execution methods

+

Terminal command

+
Format:
+
+(.venv) $ python3 RuleMiner.py <inputFile> <outputFile> <minConf> <sep>
+
+Example Usage:
+
+(.venv) $ python3 RuleMiner.py sampleDB.txt patterns.txt 0.5 ' '
+
+
+
+

Note

+

minConf can be specified in a value between 0 and 1.

+
+

Calling from a python program

+
import PAMI.AssociationRules.basic import RuleMiner as alg
+
+obj = alg.RuleMiner(iFile, measure, o.5, "  ")
+
+obj.mine()
+
+associationRules = obj.getPatterns()
+
+print("Total number of Association Rules:", len(associationRules))
+
+obj.save(oFile)
+
+Df = obj.getPatternInDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
Methods:
+

mine()

+
+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine()[source]
+

Association rule mining process will start from here

+
+ +
+
+printResults()[source]
+

Function to send the result after completion of the mining process

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to a output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Association rule mining process will start from here

+
+ +
+
+ +
+
+

PAMI.AssociationRules.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.AssociationRules.html b/sphinx/_build/html/PAMI.AssociationRules.html new file mode 100644 index 000000000..aeb52398b --- /dev/null +++ b/sphinx/_build/html/PAMI.AssociationRules.html @@ -0,0 +1,255 @@ + + + + + + + PAMI.AssociationRules package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.AssociationRules package

+
+

Subpackages

+
+ +
+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.correlatedPattern.basic.html b/sphinx/_build/html/PAMI.correlatedPattern.basic.html new file mode 100644 index 000000000..56352ab09 --- /dev/null +++ b/sphinx/_build/html/PAMI.correlatedPattern.basic.html @@ -0,0 +1,630 @@ + + + + + + + PAMI.correlatedPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.correlatedPattern.basic package

+
+

Submodules

+
+
+

PAMI.correlatedPattern.basic.CoMine module

+
+
+class PAMI.correlatedPattern.basic.CoMine.CoMine(iFile: str | DataFrame, minSup: int | float | str, minAllConf: float, sep: str = '\t')[source]
+

Bases: _correlatedPatterns

+
+

About this algorithm

+
+
Description:
+

CoMine is one of the fundamental algorithm to discover correlated patterns in a transactional database. It is based on the traditional FP-Growth algorithm. This algorithm uses depth-first search technique to find all correlated patterns in a transactional database.

+
+
Reference:
+

Lee, Y.K., Kim, W.Y., Cao, D., Han, J. (2003). CoMine: efficient mining of correlated patterns. In ICDM (pp. 581–584).

+
+
param iFile:
+

str : +Name of the Input file to mine complete set of correlated patterns

+
+
param oFile:
+

str : +Name of the output file to store complete set of correlated patterns

+
+
param minSup:
+

int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count.

+
+
param minAllConf:
+

float : +The user can specify minAllConf values within the range (0, 1).

+
+
param sep:
+

str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

+
+
Attributes:
+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
minSupint

The user given minSup

+
+
minAllConf: float

The user given minimum all confidence Ratio(should be in range of 0 to 1)

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transactions

+
+
treeclass

it represents the Tree class

+
+
itemSetCountint

it represents the total no of patterns

+
+
finalPatternsdict

it represents to store the patterns

+
+
itemSetBufferlist

it represents the store the items in mining

+
+
maxPatternLengthint

it represents the constraint for pattern length

+
+
+
+
+
+
+

Execution methods

+

Terminal command

+
Format:
+
+(.venv) $ python3 CoMine.py <inputFile> <outputFile> <minSup> <minAllConf> <sep>
+
+Example Usage:
+
+(.venv) $ python3 CoMine.py sampleTDB.txt output.txt 0.25 0.2
+
+
+
+

Note

+

minSup can be specified in support count or a value between 0 and 1.

+
+

Calling from a python program

+
from PAMI.correlatedPattern.basic import CoMine as alg
+
+obj = alg.CoMine(iFile, minSup, minAllConf,sep)
+
+obj.mine()
+
+patterns = obj.getPatterns()
+
+print("Total number of  Patterns:", len(patterns))
+
+obj.savePatterns(oFile)
+
+df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits

+
+

The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[Tuple[int], List[int | float]][source]
+

Function to send the set of correlated patterns after completion of the mining process

+
+
Returns:
+

returning correlated patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final correlated patterns in a dataframe

+
+
Returns:
+

returning correlated patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

main method to start

+
+ +
+
+printResults() None[source]
+

function to print the result after completing the process

+
+
Returns:
+

None

+
+
+
+ +
+
+save(outFile) None[source]
+

Complete set of correlated patterns will be saved into an output file

+
+
Parameters:
+

outFile (file) – name of the outputfile

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

main method to start

+
+ +
+
+ +
+
+

PAMI.correlatedPattern.basic.CoMinePlus module

+
+
+class PAMI.correlatedPattern.basic.CoMinePlus.CoMinePlus(iFile: str | DataFrame, minSup: int | float | str, minAllConf: str, sep: str = '\t')[source]
+

Bases: _correlatedPatterns

+
+

About this algorithm

+
+
Description:
+

CoMinePlus is one of the efficient algorithm to discover correlated patterns in a transactional database. Using Item Support Intervals technique which is generating correlated patterns of higher order by combining only with items that have support within specified interval.

+
+
Reference:
+

Uday Kiran R., Kitsuregawa M. (2012) Efficient Discovery of Correlated Patterns in Transactional Databases Using Items’ Support Intervals. +In: Liddle S.W., Schewe KD., Tjoa A.M., Zhou X. (eds) Database and Expert Systems Applications. DEXA 2012. Lecture Notes in Computer Science, vol 7446. Springer, Berlin, Heidelberg. +https://doi.org/10.1007/978-3-642-32600-4_18

+
+
param iFile:
+

str : +Name of the Input file to mine complete set of correlated patterns

+
+
param oFile:
+

str : +Name of the output file to store complete set of correlated patterns

+
+
param minSup:
+

int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count.

+
+
param minAllConf:
+

str : +Name of Neighbourhood file name

+
+
param sep:
+

str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

+
+
Attributes:
+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
minSupfloat

The user given minSup

+
+
minAllConf: float

The user given minimum all confidence Ratio (should be in range of 0 to 1)

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transactions

+
+
treeclass

it represents the Tree class

+
+
itemSetCountint

it represents the total no of patterns

+
+
finalPatternsdict

it represents to store the patterns

+
+
itemSetBufferlist

it represents the store the items in mining

+
+
maxPatternLengthint

it represents the constraint for pattern length

+
+
+
+
+
+
+

Execution methods

+

Terminal command

+
Format:
+
+(.venv) $ python3 CoMinePlus.py <inputFile> <outputFile> <minSup> <minAllConf> <sep>
+
+Example Usage:
+
+(.venv) $ python3 CoMinePlus.py sampleTDB.txt patterns.txt 0.4 0.5 ','
+
+
+
+

Note

+

minSup can be specified in support count or a value between 0 and 1.

+
+

Calling from a python program

+
from PAMI.correlatedPattern.basic import CoMinePlus as alg
+
+obj = alg.CoMinePlus(iFile, minSup, minAllConf, sep)
+
+obj.mine()
+
+correlatedPatterns = obj.getPatterns()
+
+print("Total number of correlated patterns:", len(correlatedPatterns))
+
+obj.save(oFile)
+
+df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits

+
+

The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[Tuple[str], List[int | float]][source]
+

Function to send the set of correlated patterns after completion of the mining process

+
+
Returns:
+

returning correlated patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final correlated patterns in a dataframe

+
+
Returns:
+

returning correlated patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Main program to start the operation

+
+ +
+
+printResults() None[source]
+

function to print the result after completing the process

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of correlated patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Main program to start the operation

+
+ +
+
+ +
+
+

PAMI.correlatedPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.correlatedPattern.html b/sphinx/_build/html/PAMI.correlatedPattern.html new file mode 100644 index 000000000..6241bcbe3 --- /dev/null +++ b/sphinx/_build/html/PAMI.correlatedPattern.html @@ -0,0 +1,209 @@ + + + + + + + PAMI.correlatedPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.coveragePattern.basic.html b/sphinx/_build/html/PAMI.coveragePattern.basic.html new file mode 100644 index 000000000..916aaca0c --- /dev/null +++ b/sphinx/_build/html/PAMI.coveragePattern.basic.html @@ -0,0 +1,648 @@ + + + + + + + PAMI.coveragePattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.coveragePattern.basic package

+
+

Submodules

+
+
+

PAMI.coveragePattern.basic.CMine module

+
+
+class PAMI.coveragePattern.basic.CMine.CMine(iFile, minRF, minCS, maxOR, sep='\t')[source]
+

Bases: _coveragePatterns

+
+

About this algorithm

+
+
Description:
+

CMine algorithms aims to discover the coverage patterns in transactional databases.

+
+
Reference:
+

Bhargav Sripada, Polepalli Krishna Reddy, Rage Uday Kiran: +Coverage patterns for efficient banner advertisement placement. WWW (Companion Volume) 2011: 131-132 +__https://dl.acm.org/doi/10.1145/1963192.1963259

+
+
param iFile:
+

str : +Name of the Input file to mine complete set of coverage patterns

+
+
param oFile:
+

str : +Name of the output file to store complete set of coverage patterns

+
+
param minRF:
+

str: +Controls the minimum number of transactions in which every item must appear in a database.

+
+
param minCS:
+

str: +Controls the minimum number of transactions in which at least one time within a pattern must appear in a database.

+
+
param maxOR:
+

str: +Controls the maximum number of transactions in which any two items within a pattern can reappear.

+
+
param sep:
+

str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

+
+
Attributes:
+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
+
+
+
+
+

Execution methods

+

Terminal command

+
Format:
+
+(.venv) $ python3 CMine.py <inputFile> <outputFile> <minRF> <minCS> <maxOR> <'    '>
+
+Example Usage:
+
+(.venv) $ python3 CMine.py sampleTDB.txt patterns.txt 0.4 0.7 0.5 '       '
+
+
+

Calling from a python program

+
from PAMI.coveragePattern.basic import CMine as alg
+
+obj = alg.CMine(iFile, minRF, minCS, maxOR, seperator)
+
+obj.mine()
+
+coveragePattern = obj.getPatterns()
+
+print("Total number of coverage Patterns:", len(coveragePattern))
+
+obj.save(oFile)
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+creatingCoverageItems() Dict[str, List[str]][source]
+

This function creates coverage items from _database.

+
+
Returns:
+

coverageTidData that stores coverage items and their tid list.

+
+
Return type:
+

dict

+
+
+
+ +
+
+genPatterns(prefix: Tuple[str, int], tidData: List[Tuple[str, int]]) None[source]
+

This function generate coverage pattern about prefix.

+
+
Parameters:
+
    +
  • prefix – String

  • +
  • tidData – list

  • +
+
+
Returns:
+

None

+
+
+
+ +
+
+generateAllPatterns(coverageItems: Dict[str, int]) None[source]
+

This function generates all coverage patterns.

+
+
Parameters:
+

coverageItems – coverage items

+
+
Returns:
+

None

+
+
+
+ +
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, int][source]
+

Function to send the set of coverage patterns after completion of the mining process

+
+
Returns:
+

returning coverage patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final coverage patterns in a dataframe

+
+
Returns:
+

returning coverage patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Main method to start

+
+ +
+
+printResults() None[source]
+

This function is used to print the result

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of coverage patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (file) – name of the outputfile

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Main method to start

+
+ +
+
+tidToBitset(item_set: Dict[str, int]) Dict[str, int][source]
+

This function converts tid list to bitset.

+
+
Parameters:
+

item_set

+
+
Returns:
+

Dictionary

+
+
Return type:
+

dict

+
+
+
+ +
+
+ +
+
+

PAMI.coveragePattern.basic.CPPG module

+
+
+class PAMI.coveragePattern.basic.CPPG.CPPG(iFile, minRF, minCS, maxOR, sep='\t')[source]
+

Bases: _coveragePatterns

+
+
Description:
+

CPPG algorithm discovers coverage patterns in a transactional database.

+
+
Reference:
+

Gowtham Srinivas, P.; Krishna Reddy, P.; Trinath, A. V.; Bhargav, S.; Uday Kiran, R. (2015). +Mining coverage patterns from transactional databases. Journal of Intelligent Information Systems, 45(3), 423–439. +https://link.springer.com/article/10.1007/s10844-014-0318-3

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of coverage patterns

  • +
  • oFile – str : +Name of the output file to store complete set of coverage patterns

  • +
  • minRF – str: +Controls the minimum number of transactions in which every item must appear in a database.

  • +
  • minCS – str: +Controls the minimum number of transactions in which at least one time within a pattern must appear in a database.

  • +
  • maxOR – str: +Controls the maximum number of transactions in which any two items within a pattern can reappear.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 CPPG.py <inputFile> <outputFile> <minRF> <minCS> <maxOR> <'     '>
+
+Example Usage:
+
+(.venv) $ python3 CPPG.py sampleTDB.txt patterns.txt 0.4 0.7 0.5 ','
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Importing this algorithm into a python program

+
from PAMI.coveragePattern.basic import CPPG as alg
+
+obj = alg.CPPG(iFile, minRF, minCS, maxOR)
+
+obj.mine()
+
+coveragePattern = obj.getPatterns()
+
+print("Total number of coverage Patterns:", len(coveragePattern))
+
+obj.save(oFile)
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, List[int]][source]
+

Function to send the set of periodic-frequent patterns after completion of the mining process

+
+
Returns:
+

returning periodic-frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final periodic-frequent patterns in a dataframe

+
+
Returns:
+

returning periodic-frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Mining process will start from this function

+
+ +
+
+printResults() None[source]
+

Function used to print the result

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of periodic-frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (file) – name of the outputfile

+
+
+
+ +
+
+startMine() None[source]
+

Mining process will start from this function

+
+ +
+
+ +
+
+

PAMI.coveragePattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.coveragePattern.html b/sphinx/_build/html/PAMI.coveragePattern.html new file mode 100644 index 000000000..a5794ef4f --- /dev/null +++ b/sphinx/_build/html/PAMI.coveragePattern.html @@ -0,0 +1,213 @@ + + + + + + + PAMI.coveragePattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.extras.DF2DB.html b/sphinx/_build/html/PAMI.extras.DF2DB.html new file mode 100644 index 000000000..2bf11b9b2 --- /dev/null +++ b/sphinx/_build/html/PAMI.extras.DF2DB.html @@ -0,0 +1,802 @@ + + + + + + + PAMI.extras.DF2DB package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.extras.DF2DB package

+
+

Submodules

+
+
+

PAMI.extras.DF2DB.DF2DB module

+
+
+class PAMI.extras.DF2DB.DF2DB.DF2DB(inputDF, thresholdValue, condition, DFtype='sparse')[source]
+

Bases: object

+
+
Description:
+

This class will create database for given DataFrame based on Threshold values and conditions are defined in the class. +Converts Dataframe into sparse or dense dataframes.

+
+
Attributes:
+
+
param inputDF:
+

DataFrame : +It is sparse or dense DataFrame

+
+
param thresholdValue:
+

int or float : +It is threshold value of all item

+
+
param condition:
+

str : +It is condition of all item

+
+
param DFtype:
+

str : +It is DataFrame type. It should be sparse or dense. Default DF is sparse.

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.DF2DB import DF2DB as db
+
+obj = db.DF2DB(idf, ">=", 16, "sparse/dense")
+
+obj.getTransactional("outputFileName") # To create transactional database
+
+obj.getTemporal("outputFileName") # To create temporal database
+
+obj.getUtility("outputFileName") # To create utility database
+
+
+
+
+getTemporalDatabase(outputFile) str[source]
+

create temporal database and return outputFile name +:param outputFile: file name or path to store database +:type outputFile: str +:return: outputFile name +:rtype: str

+
+ +
+
+getTransactionalDatabase(outputFile) str[source]
+

create transactional database and return outputFileName +:param outputFile: file name or path to store database +:type outputFile: str +:return: outputFile name +:rtype: str

+
+ +
+
+getUtilityDatabase(outputFile) str[source]
+

create utility database and return outputFile name +:param outputFile: file name or path to store database +:type outputFile: str +:return: outputFile name +:rtype: str

+
+ +
+
+ +
+
+

PAMI.extras.DF2DB.DF2DBPlus module

+
+
+

PAMI.extras.DF2DB.DenseFormatDF module

+
+
+class PAMI.extras.DF2DB.DenseFormatDF.DenseFormatDF(inputDF)[source]
+

Bases: object

+
+
Description:
+

This class create Data Base from DataFrame.

+
+
Attributes:
+
+
param inputDF:
+

dataframe : +It is dense DataFrame

+
+
param condition:
+

str : +It is condition to judge the value in dataframe

+
+
param thresholdValue:
+

int or float : +User defined value.

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.DF2DB import DenseFormatDF as db
+
+obj = db.DenseFormatDF(iDdf, ">=", 16 )
+
+obj.convert2TransactionalDatabase("outputFileName") # To create transactional database
+
+obj.convert2TemporalDatabase("outputFileName") # To create temporal database
+
+obj.convert2MultipleTimeSeries("outputFileName") # To create Multiple TimeSeries database
+
+obj.convert2UtilityDatabase("outputFileName") # To create utility database
+
+obj.getFileName("outputFileName") # To get file name of the database
+
+
+
+
+convert2MultipleTimeSeries(interval: int, outputFile: str, condition: str, thresholdValue: int | float) None[source]
+
+
Description:
+

Create the multiple time series database.

+
+
Parameters:
+
    +
  • outputFile (str) – Write multiple time series database into outputFile.

  • +
  • interval (int) – Breaks the given timeseries into intervals.

  • +
  • condition – It is condition to judge the value in dataframe

  • +
  • thresholdValue (int or float) – User defined value.

  • +
+
+
+
+ +
+
+convert2TemporalDatabase(outputFile: str, condition: str, thresholdValue: int | float) None[source]
+
+
Description:
+

Create temporal database

+
+
Parameters:
+
    +
  • outputFile (str) – Write temporal database into outputFile

  • +
  • condition (str) – It is condition to judge the value in dataframe

  • +
  • thresholdValue (Union) – User defined value.

  • +
+
+
+
+ +
+
+convert2TransactionalDatabase(outputFile: str, condition: str, thresholdValue: int | float) None[source]
+
+
Description:
+

Create transactional data base

+
+
Attributes:
+
+
param outputFile:
+

Write transactional database into outputFile

+
+
type outputFile:
+

str

+
+
param condition:
+

It is condition to judge the value in dataframe

+
+
type condition:
+

str

+
+
param thresholdValue:
+

User defined value.

+
+
type thresholdValue:
+

Union[int, float]

+
+
+
+
+
+ +
+
+convert2UncertainTransactional(outputFile: str, condition: str, thresholdValue: int | float) None[source]
+
+ +
+
+convert2UtilityDatabase(outputFile: str) None[source]
+
+
Description:
+

Create the utility database.

+
+
Parameters:
+

outputFile (str) – Write utility database into outputFile

+
+
Returns:
+

None

+
+
+
+ +
+
+getFileName() str[source]
+
+
Returns:
+

outputFile name

+
+
Return type:
+

str

+
+
+
+ +
+
+ +
+
+

PAMI.extras.DF2DB.SparseFormatDF module

+
+
+class PAMI.extras.DF2DB.SparseFormatDF.SparseFormatDF(inputDF, condition: str, thresholdValue: float)[source]
+

Bases: object

+
+
Description:
+

This class create Data Base from DataFrame.

+
+
Attributes:
+
+
param inputDF:
+

dataframe : +It is dense DataFrame

+
+
param condition:
+

str : +It is condition to judge the value in dataframe

+
+
param thresholdValue:
+

int or float : +User defined value.

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.DF2DB import SparseFormatDF as db
+
+obj = db.SparseFormatDF(iDdf, ">=", 16)
+
+obj.save(oFile)
+
+obj.createTransactional("outputFileName") # To create transactional database
+
+obj.createTemporal("outputFileName") # To create temporal database
+
+obj.createUtility("outputFileName") # To create utility database
+
+obj.getFileName("outputFileName") # To get file name of the database
+
+
+
+
+createTemporal(outputFile: str) None[source]
+

Create temporal data base +:param outputFile: Write temporal data base into outputFile +:type outputFile: str +:return: None

+
+ +
+
+createTransactional(outputFile: str) None[source]
+

Create transactional data base +:param outputFile: Write transactional data base into outputFile +:type outputFile: str +:return: None

+
+ +
+
+createUtility(outputFile: str) None[source]
+

Create the utility database. +:param outputFile: Write utility database into outputFile +:type outputFile: str +:return: None

+
+ +
+
+getFileName() str[source]
+
+ +
+
+ +
+
+

PAMI.extras.DF2DB.createTDB module

+
+
+class PAMI.extras.DF2DB.createTDB.createTDB(df, threshold)[source]
+

Bases: object

+
+
Description:
+

This class will create Transactional database.

+
+
Parameters:
+

df (list) – It represents the dataframe

+
+
+

:param threshold : It is the threshold value of all item. +:type threshold: int or float

+
+

Importing this algorithm into a python program

+
from PAMI.frequentPattern.basic import FPGrowth as fp
+
+obj = fp.createTDB(idf, ">=" )
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+createTDB()[source]
+
+
Description:
+

To Create transactional database

+
+
+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+ +
+
+

PAMI.extras.DF2DB.denseDF2DBPlus module

+
+
+class PAMI.extras.DF2DB.denseDF2DBPlus.DenseFormatDFPlus(inputDF, thresholdConditionDF)[source]
+

Bases: object

+
+
Description:
+

This class create Data Base from DataFrame.

+
+
Attributes:
+
+
param inputDF:
+

dataframe : +It is dense DataFrame

+
+
param thresholdConditionDF:
+

str or int or float: +It is condition to judge the value in dataframe

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.DF2DB import DenseFormatDFPlus as db
+
+obj = db.DenseFormatDFPlus(iDdf, ">=", 16)
+
+obj.save(oFile)
+
+obj.createTransactional("outputFileName") # To create transactional database
+
+obj.createTemporal("outputFileName") # To create temporal database
+
+obj.createUtility("outputFileName") # To create utility database
+
+obj.getFileName("outputFileName") # To get file name of the database
+
+
+
+
+createTemporal(outputFile: str) None[source]
+

Create temporal data base +:param outputFile: Write temporal data base into outputFile +:type outputFile: str +:return: None

+
+ +
+
+createTransactional(outputFile: str) None[source]
+

Create transactional data base +:param outputFile: Write transactional data base into outputFile +:type outputFile: str +:return: None

+
+ +
+
+createUtility(outputFile: str) None[source]
+

Create the utility data base. +:param outputFile: Write utility data base into outputFile +:type outputFile: str +:return: None

+
+ +
+
+getFileName() str[source]
+
+ +
+
+ +
+
+

PAMI.extras.DF2DB.denseDF2DB_dump module

+
+
+class PAMI.extras.DF2DB.denseDF2DB_dump.DenseFormatDF(inputDF, condition: str, thresholdValue: float)[source]
+

Bases: object

+
+
Description:
+

This class create Data Base from DataFrame.

+
+
Attributes:
+
+
param inputDF:
+

dataframe : +It is dense DataFrame

+
+
param condition:
+

str : +It is condition to judge the value in dataframe

+
+
param thresholdValue:
+

int or float : +User defined value.

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.DF2DB import DenseFormatDF_dump as db
+
+obj = db.DenseFormatDF_dump(iDdf, ">=", 16)
+
+obj.save(oFile)
+
+obj.createTransactional("outputFileName") # To create transactional database
+
+obj.createTemporal("outputFileName") # To create temporal database
+
+obj.createUtility("outputFileName") # To create utility database
+
+obj.getFileName("outputFileName") # To get file name of the database
+
+
+
+
+createTemporal(outputFile: str) None[source]
+
+
Description:
+

Create temporal data base

+
+
Parameters:
+

outputFile (str) – Write temporal data base into outputFile

+
+
Returns:
+

None

+
+
+
+ +
+
+createTransactional(outputFile: str) None[source]
+
+
Description:
+

Create transactional data base

+
+
Parameters:
+

outputFile (str) – Write transactional data base into outputFile

+
+
Returns:
+

None

+
+
+
+ +
+
+createUtility(outputFile: str) None[source]
+
+
Description:
+

Create the utility database.

+
+
Parameters:
+

outputFile (str) – Write utility database into outputFile

+
+
Returns:
+

None

+
+
+
+ +
+
+getFileName() str[source]
+
+
Returns:
+

outputFile name

+
+
Return type:
+

str

+
+
+
+ +
+
+ +
+
+

PAMI.extras.DF2DB.sparseDF2DBPlus module

+
+
+class PAMI.extras.DF2DB.sparseDF2DBPlus.SparseFormatDFPlus(inputDF, thresholdConditionDF)[source]
+

Bases: object

+
+
Description:
+

This class create Data Base from DataFrame.

+
+
Attributes:
+
+
param inputDF:
+

dataframe : +It is dense DataFrame

+
+
param thresholdConditionDF:
+

str : +It is condition to judge the value in dataframe

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.DF2DB import SparseFormatDFPlus as db
+
+obj = db.SparseFormatDFPlus(iDdf, ">=", 16)
+
+obj.save(oFile)
+
+obj.createTransactional("outputFileName") # To create transactional database
+
+obj.createTemporal("outputFileName") # To create temporal database
+
+obj.createUtility("outputFileName") # To create utility database
+
+ obj.getFileName("outputFileName") # To get file name of the database
+
+
+
+
+createTemporal(outputFile: str) None[source]
+

Create temporal data base +:param outputFile: Write temporal data base into outputFile +:type outputFile: str +:return: None

+
+ +
+
+createTransactional(outputFile: str) None[source]
+

Create transactional data base +:param outputFile: Write transactional data base into outputFile +:type outputFile: str +:return: None

+
+ +
+
+createUtility(outputFile: str) None[source]
+

Create the utility data base. +:param outputFile: Write utility data base into outputFile +:type outputFile: str +:return: None

+
+ +
+
+getFileName() str[source]
+
+ +
+
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.extras.calculateMISValues.html b/sphinx/_build/html/PAMI.extras.calculateMISValues.html new file mode 100644 index 000000000..c92685ddd --- /dev/null +++ b/sphinx/_build/html/PAMI.extras.calculateMISValues.html @@ -0,0 +1,287 @@ + + + + + + + PAMI.extras.calculateMISValues package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.extras.calculateMISValues package

+
+

Submodules

+
+
+

PAMI.extras.calculateMISValues.usingBeta module

+
+
+class PAMI.extras.calculateMISValues.usingBeta.usingBeta(iFile: str, beta: int, threshold: int, sep: str)[source]
+

Bases: object

+
+
Description:
+

This code is used to calculate multiple minimum support of items in the the given database. Output can be stored in file or as as dataframe.

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to get the patterns as DataFrame

  • +
  • beta – str : +Name of the output file to store complete set of frequent patterns

  • +
  • threshold – int : +The user can specify threshold either in count or proportion of database size. If the program detects the data type of threshold is integer, then it treats threshold is expressed in count.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
+
+

Importing this algorithm into a python program

+

+
+
+

from PAMI.extras.calculateMISValues import usingBeta as db

+

obj = db.usingBeta(iFile, 3, 16, ” “)

+

obj.save(oFile)

+
+
+calculateMIS() None[source]
+
+ +
+
+getMISDataFrame() DataFrame[source]
+

Storing items and its respective minimum support in a dataframe +:return: returning items and its respective minimum support in a dataframe +:rtype: pd.DataFrame

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of items and its respective minimum support values will be loaded in to an output file +:param outFile: name of the output file +:type outFile: csv file +:return: None

+
+ +
+
+ +
+
+

PAMI.extras.calculateMISValues.usingSD module

+
+
+class PAMI.extras.calculateMISValues.usingSD.usingSD(iFile: str, threshold: int, sep: str)[source]
+

Bases: object

+
+
Description:
+

This code is used to calculate multiple minimum support of items in the the given database. Output can be stored in file or as as dataframe.

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • sd – int : +SD of items to mine complete set of frequent patterns.

  • +
  • threshold – int : +The user can specify threshold either in count or proportion of database size. If the program detects the data type of threshold is integer, then it treats threshold is expressed in count.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.calculateMISValues import usingSD as db
+
+obj = db.usingSD(iFile, 16, "       ")
+
+obj.getPatterns("outputFileName") # To create patterns in dataframe
+
+obj.save(oFile)
+
+
+
+
+calculateMIS() None[source]
+
+ +
+
+getDataFrame() DataFrame[source]
+

Storing Items and its respective calculated minimum support values in a dataframe +:return: returning Items and its respective calculated minimum support values in a dataframe +:rtype: pd.DataFrame

+
+ +
+
+save(outFile: str) None[source]
+

Complete Items and its respective calculated minimum support values will be loaded in to an output file +:param outFile: name of the output file +:type outFile: csv file +:return: None

+
+ +
+
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.extras.dbStats.html b/sphinx/_build/html/PAMI.extras.dbStats.html new file mode 100644 index 000000000..55a1bc574 --- /dev/null +++ b/sphinx/_build/html/PAMI.extras.dbStats.html @@ -0,0 +1,2041 @@ + + + + + + + PAMI.extras.dbStats package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.extras.dbStats package

+
+

Submodules

+
+
+

PAMI.extras.dbStats.FuzzyDatabase module

+
+
+class PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase(inputFile: str, sep: str = '\t')[source]
+

Bases: object

+
+
Description:
+

FuzzyDatabase is class to get stats of fuzzyDatabase.

+
+
Attributes:
+
+
inputFilefile

input file path

+
+
sepstr

separator in file. Default is tab space.

+
+
+
+
Methods:
+
+
run()

execute readDatabase function

+
+
readDatabase()

read database from input file

+
+
getDatabaseSize()

get the size of database

+
+
getMinimumTransactionLength()

get the minimum transaction length

+
+
getAverageTransactionLength()

get the average transaction length. It is sum of all transaction length divided by database length.

+
+
getMaximumTransactionLength()

get the maximum transaction length

+
+
getStandardDeviationTransactionLength()

get the standard deviation of transaction length

+
+
getSortedListOfItemFrequencies()

get sorted list of item frequencies

+
+
getSortedListOfTransactionLength()

get sorted list of transaction length

+
+
save(data, outputFile)

store data into outputFile

+
+
getMinimumUtility()

get the minimum utility

+
+
getAverageUtility()

get the average utility

+
+
getMaximumUtility()

get the maximum utility

+
+
getSortedUtilityValuesOfItem()

get sorted utility values each item

+
+
+
from PAMI.extras.dbStats import FuzzyDatabaseStats as db
+
+obj = db.FuzzyDatabase(iFile, "     ")
+
+obj.run()
+
+obj.printStats()
+
+obj.save(oFile)
+
+
+
+
+
+
+creatingItemSets() None[source]
+

Storing the complete transactions of the database/input file in a database variable

+
+ +
+
+getAverageTransactionLength() float[source]
+

get the average transaction length. It is sum of all transaction length divided by database length. +:return: average transaction length +:rtype: float

+
+ +
+
+getAverageUtility() float[source]
+

get the average utility +:return: average utility +:rtype: float

+
+ +
+
+getDatabaseSize() int[source]
+

get the size of database +:return: dataset size +:rtype: int

+
+ +
+
+getFrequenciesInRange() dict[source]
+
+ +
+
+getMaximumTransactionLength() int[source]
+

get the maximum transaction length +:return: maximum transaction length +:rtype: int

+
+ +
+
+getMaximumUtility() int[source]
+

get the maximum utility +:return: max utility +:rtype: int

+
+ +
+
+getMinimumTransactionLength() int[source]
+

get the minimum transaction length +:return: minimum transaction length +:rtype: int

+
+ +
+
+getMinimumUtility() int[source]
+

get the minimum utility +:return: min utility +:rtype: int

+
+ +
+
+getNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getSortedListOfItemFrequencies() dict[source]
+

get sorted list of item frequencies +:return: item frequencies +:rtype: dict

+
+ +
+
+getSortedUtilityValuesOfItem() dict[source]
+

get sorted utility value each item. key is item and value is utility of item +:return: sorted dictionary utility value of item +:rtype: dict

+
+ +
+
+getSparsity() float[source]
+

get the sparsity of database +:return: dataset sparsity +:rtype: float

+
+ +
+
+getStandardDeviationTransactionLength() float[source]
+

get the standard deviation transaction length +:return: standard deviation transaction length +:rtype: float

+
+ +
+
+getTotalNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getTotalUtility() int[source]
+

get sum of utility +:return: total utility +:rtype: int

+
+ +
+
+getTransanctionalLengthDistribution() dict[source]
+

get transaction length +:return: transactional length +:rtype: dict

+
+ +
+
+getVarianceTransactionLength() float[source]
+

get the variance transaction length +:return: variance transaction length +:rtype: float

+
+ +
+
+plotGraphs() None[source]
+
+ +
+
+printStats() None[source]
+
+ +
+
+readDatabase() None[source]
+

read database from input file and store into database and size of each transaction.

+
+ +
+
+run() None[source]
+
+ +
+
+save(data: dict, outputFile: str) None[source]
+

store data into outputFile +:param data: input data +:type data: dict +:param outputFile: output file name or path to store +:type outputFile: str +:return: None

+
+ +
+ +
+
+

PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats module

+
+
+class PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats(inputFile: str, sep: str = '\t')[source]
+

Bases: object

+
+
Description:
+

MultipleTimeSeriesDatabaseStats is class to get stats of multiple time series fuzzy database.

+
+
Attributes:
+
+
param inputFile:
+

file : +input file path

+
+
param sep:
+

str +separator in file. Default is tab space.

+
+
+
+
Methods:
+
+
run()

execute readDatabase function

+
+
readDatabase()

read database from input file

+
+
getDatabaseSize()

get the size of database

+
+
getTotalNumberOfItems()

get the total number of items in a database

+
+
getMinimumTransactionLength()

get the minimum transaction length

+
+
getAverageTransactionLength()

get the average transaction length. It is sum of all transaction length divided by database length.

+
+
getMaximumTransactionLength()

get the maximum transaction length

+
+
getStandardDeviationTransactionLength()

get the standard deviation of transaction length

+
+
convertDataIntoMatrix()

Convert the database into matrix form to calculate the sparsity and density of a database

+
+
getSparsity()

get sparsity value of database

+
+
getDensity()

get density value of database

+
+
getSortedListOfItemFrequencies()

get sorted list of item frequencies

+
+
getSortedListOfTransactionLength()

get sorted list of transaction length

+
+
save(data, outputFile)

store data into outputFile

+
+
printStats()

To print all the stats of the database

+
+
plotGraphs()

To plot all the graphs of frequency disctribution of items and transaction length distribution in database

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.dbStats import MultipleTimeSeriesFuzzyDatabaseStats as db
+
+obj = db.MultipleTimeSeriesFuzzyDatabaseStats(iFile, "      ")
+
+obj.run()
+
+obj.save(oFile)
+
+obj.printStats()
+
+
+
+
+convertDataIntoMatrix() ndarray[source]
+
+ +
+
+getAverageTransactionLength() float[source]
+

get the average transaction length. It is sum of all transaction length divided by database length. +:return: average transaction length +:rtype: float

+
+ +
+
+getDatabaseSize() int[source]
+

get the size of database +:return: dataset size +:rtype: int

+
+ +
+
+getDensity() float[source]
+

get the sparsity of database. sparsity is percentage of 0 of database. +:return: database sparsity +:rtype: float

+
+ +
+
+getFrequenciesInRange() dict[source]
+
+ +
+
+getMaximumTransactionLength() int[source]
+

get the maximum transaction length +:return: maximum transaction length +:rtype: int

+
+ +
+
+getMinimumTransactionLength() int[source]
+

get the minimum transaction length +:return: minimum transaction length +:rtype: int

+
+ +
+
+getNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getSortedListOfItemFrequencies() dict[source]
+

get sorted list of item frequencies +:return: item frequencies +:rtype: dict

+
+ +
+
+getSparsity() float[source]
+

get the sparsity of database. sparsity is percentage of 0 of database. +:return: database sparsity +:rtype: float

+
+ +
+
+getStandardDeviationTransactionLength() float[source]
+

get the standard deviation transaction length +:return: standard deviation transaction length +:rtype: float

+
+ +
+
+getTotalNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getTransanctionalLengthDistribution() dict[source]
+

get transaction length +:return: transactional length +:rtype: dict

+
+ +
+
+getVarianceTransactionLength() float[source]
+

get the variance transaction length +:return: variance transaction length +:rtype: float

+
+ +
+
+plotGraphs() None[source]
+
+ +
+
+printStats() None[source]
+
+ +
+
+readDatabase() None[source]
+

read database from input file and store into database and size of each transaction.

+
+ +
+
+run() None[source]
+
+ +
+
+save(data: dict, outputFile: str) None[source]
+

store data into outputFile +:param data: input data +:type data: dict +:param outputFile: output file name or path to store +:type outputFile: str +:return: None

+
+ +
+
+ +
+
+

PAMI.extras.dbStats.SequentialDatabase module

+
+
+class PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase(inputFile: str, sep: str = '\t')[source]
+

Bases: object

+

SequentialDatabase is to get stats of database like avarage, minimun, maximum and so on.

+
+
Attributes:
+
+
param inputFile:
+

file : +input file path

+
+
param sep:
+

str +separator in file. Default is tab space.

+
+
+
+
Methods:
+
+
readDatabase():

read sequential database from input file and store into database and size of each sequence and subsequences.

+
+
getDatabaseSize(self):

get the size of database

+
+
getTotalNumberOfItems(self):

get the number of items in database.

+
+
getMinimumSequenceLength(self):

get the minimum sequence length

+
+
getAverageSubsequencePerSequenceLength(self):

get the average subsequence length per sequence length. It is sum of all subsequence length divided by sequence length.

+
+
getAverageItemPerSubsequenceLength(self):

get the average Item length per subsequence. It is sum of all item length divided by subsequence length.

+
+
getMaximumSequenceLength(self):

get the maximum sequence length

+
+
getStandardDeviationSubsequenceLength(self):

get the standard deviation subsequence length

+
+
getVarianceSequenceLength(self):

get the variance Sequence length

+
+
getSequenceSize(self):

get the size of sequence

+
+
getMinimumSubsequenceLength(self):

get the minimum subsequence length

+
+
getAverageItemPerSequenceLength(self):

get the average item length per sequence. It is sum of all item length divided by sequence length.

+
+
getMaximumSubsequenceLength(self):

get the maximum subsequence length

+
+
getStandardDeviationSubsequenceLength(self):

get the standard deviation subsequence length

+
+
getVarianceSubsequenceLength(self):

get the variance subSequence length

+
+
getSortedListOfItemFrequencies(self):

get sorted list of item frequencies

+
+
getFrequenciesInRange(self):

get sorted list of item frequencies in some range

+
+
getSequencialLengthDistribution(self):

get Sequence length Distribution

+
+
getSubsequencialLengthDistribution(self):

get subSequence length distribution

+
+
printStats(self):

to print the all status of sequence database

+
+
plotGraphs(self):

to plot the distribution about items, subsequences in sequence and items in subsequence

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.dbStats import SequentialDatabase as db
+
+obj = db.SequentialDatabase(iFile, "        ")
+
+obj.save(oFile)
+
+obj.run()
+
+obj.printStats()
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 SequentialDatabase.py <inputFile>
+
+Example Usage:
+
+(.venv) $ python3 SequentialDatabase.py sampleDB.txt
+
+(.venv) $ python3 SequentialDatabase.py sampleDB.txt
+
+
+
+
+

Sample run of the importing code:

+
+

import PAMI.extra.DBstats.SequentialDatabase as alg +_ap=alg.SequentialDatabase(inputfile,sep) +_ap.readDatabase() +_ap.printStats() +_ap.plotGraphs()

+
+
+
+

Credits:

+
+

The complete program was written by Shota Suzuki under the supervision of Professor Rage Uday Kiran.

+
+
+
+getAverageItemPerSequenceLength() float[source]
+

get the average item length per sequence. It is sum of all item length divided by sequence length. +:return: average item length per sequence +:rtype: float

+
+ +
+
+getAverageItemPerSubsequenceLength() float[source]
+

get the average Item length per subsequence. It is sum of all item length divided by subsequence length. +:return: average Item length per subsequence +:rtype: float

+
+ +
+
+getAverageSubsequencePerSequenceLength() float[source]
+

get the average subsequence length per sequence length. It is sum of all subsequence length divided by sequence length. +:return: average subsequence length per sequence length +:rtype: float

+
+ +
+
+getDatabaseSize() int[source]
+

get the size of database +:return: dataset size +:rtype: int

+
+ +
+
+getFrequenciesInRange() Dict[int, int][source]
+

get sorted list of item frequencies in some range +:return: item separated by its frequencies +:rtype: dict

+
+ +
+
+getMaximumSequenceLength() int[source]
+

get the maximum sequence length +:return: maximum sequence length +:rtype: int

+
+ +
+
+getMaximumSubsequenceLength() int[source]
+

get the maximum subsequence length +:return: maximum subsequence length +:rtype: int

+
+ +
+
+getMinimumSequenceLength() int[source]
+

get the minimum sequence length +:return: minimum sequence length +:rtype: int

+
+ +
+
+getMinimumSubsequenceLength() int[source]
+

get the minimum subsequence length +:return: minimum subsequence length +:rtype: int

+
+ +
+
+getSequenceSize() int[source]
+

get the size of sequence +:return: sequences size +:rtype: int

+
+ +
+
+getSequencialLengthDistribution() Dict[int, int][source]
+

get Sequence length Distribution +:return: Sequence length +:rtype: dict

+
+ +
+
+getSortedListOfItemFrequencies() Dict[str, int][source]
+

get sorted list of item frequencies +:return: item frequencies +:rtype: dict

+
+ +
+
+getStandardDeviationSequenceLength() float[source]
+

get the standard deviation sequence length +:return: standard deviation sequence length +:rtype: float

+
+ +
+
+getStandardDeviationSubsequenceLength() float[source]
+

get the standard deviation subsequence length +:return: standard deviation subsequence length +:rtype: float

+
+ +
+
+getSubsequencialLengthDistribution() Dict[int, int][source]
+

get subSequence length distribution +:return: subSequence length +:rtype: dict

+
+ +
+
+getTotalNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getVarianceSequenceLength() float[source]
+

get the variance Sequence length +:return: variance Sequence length +:rtype: float

+
+ +
+
+getVarianceSubsequenceLength() float[source]
+

get the variance subSequence length +:return: variance subSequence length +:rtype: float

+
+ +
+
+plotGraphs() None[source]
+

To plot the distribution about items, subsequences in sequence and items in subsequence

+
+ +
+
+printStats() None[source]
+

To print the all status of sequence database

+
+ +
+
+readDatabase() None[source]
+

read sequential database from input file and store into database and size of each sequence and subsequences.

+
+ +
+
+run() None[source]
+
+ +
+
+ +
+
+

PAMI.extras.dbStats.TemporalDatabase module

+
+
+class PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase(inputFile: str | DataFrame, sep: str = '\t')[source]
+

Bases: object

+
+
Description:
+

TemporalDatabase is class to get stats of database.

+
+
Attributes:
+
+
:param inputFilefile

input file path

+
+
:param sepstr

separator in file. Default is tab space.

+
+
+
+
Methods:
+
+
run()

execute readDatabase function

+
+
readDatabase()

read database from input file

+
+
getDatabaseSize()

get the size of database

+
+
getMinimumTransactionLength()

get the minimum transaction length

+
+
getAverageTransactionLength()

get the average transaction length. It is sum of all transaction length divided by database length.

+
+
getMaximumTransactionLength()

get the maximum transaction length

+
+
getStandardDeviationTransactionLength()

get the standard deviation of transaction length

+
+
getSortedListOfItemFrequencies()

get sorted list of item frequencies

+
+
getSortedListOfTransactionLength()

get sorted list of transaction length

+
+
save(data, outputFile)

store data into outputFile

+
+
getMinimumPeriod()

get the minimum period

+
+
getAveragePeriod()

get the average period

+
+
getMaximumPeriod()

get the maximum period

+
+
getStandardDeviationPeriod()

get the standard deviation period

+
+
getNumberOfTransactionsPerTimestamp()

get number of transactions per time stamp. This time stamp range is 1 to max period.

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.dbStats import TemporalDatabase as db
+
+obj = db.TemporalDatabase(iFile, "  ")
+
+obj.save(oFile)
+
+obj.run()
+
+obj.printStats()
+
+
+
+
+convertDataIntoMatrix() ndarray[source]
+
+ +
+
+getAverageInterArrivalPeriod() float[source]
+

get the average inter arrival period. It is sum of all period divided by number of period. +:return: average inter arrival period +:rtype: float

+
+ +
+
+getAveragePeriodOfItem() float[source]
+

get the average period of the item +:return: average period +:rtype: float

+
+ +
+
+getAverageTransactionLength() float[source]
+

get the average transaction length. It is sum of all transaction length divided by database length. +:return: average transaction length +:rtype: float

+
+ +
+
+getDatabaseSize() int[source]
+

get the size of database +:return: dataset size +:rtype: int

+
+ +
+
+getDensity() float[source]
+

get the sparsity of database. sparsity is percentage of 0 of database. +:return: database sparsity +:rtype: float

+
+ +
+
+getFrequenciesInRange() Dict[int, int][source]
+
+ +
+
+getMaximumInterArrivalPeriod() int[source]
+

get the maximum inter arrival period +:return: maximum inter arrival period +:rtype: int

+
+ +
+
+getMaximumPeriodOfItem() int[source]
+

get the maximum period of the item +:return: maximum period +:rtype: int

+
+ +
+
+getMaximumTransactionLength() int[source]
+

get the maximum transaction length +:return: maximum transaction length +:rtype: int

+
+ +
+
+getMinimumInterArrivalPeriod() int[source]
+

get the minimum inter arrival period +:return: minimum inter arrival period +:rtype: int

+
+ +
+
+getMinimumPeriodOfItem() int[source]
+

get the minimum period of the item +:return: minimum period +:rtype: int

+
+ +
+
+getMinimumTransactionLength() int[source]
+

get the minimum transaction length +:return: minimum transaction length +:rtype: int

+
+ +
+
+getNumberOfTransactionsPerTimestamp() Dict[int, int][source]
+

get number of transactions per time stamp +:return: number of transactions per time stamp as dict +:rtype: dict

+
+ +
+
+getPeriodsInRange() Dict[int, int][source]
+
+ +
+
+getSortedListOfItemFrequencies() Dict[str, int][source]
+

get sorted list of item frequencies +:return: item frequencies +:rtype: dict

+
+ +
+
+getSparsity() float[source]
+

get the sparsity of database. sparsity is percentage of 0 of database. +:return: database sparsity +:rtype: float

+
+ +
+
+getStandardDeviationPeriod() float[source]
+

get the standard deviation period +:return: standard deviation period +:rtype: float

+
+ +
+
+getStandardDeviationTransactionLength() float[source]
+

get the standard deviation transaction length +:return: standard deviation transaction length +:rtype: float

+
+ +
+
+getTotalNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getTransanctionalLengthDistribution() Dict[int, int][source]
+

get transaction length +:return: transactional length +:rtype: dict

+
+ +
+
+getVarianceTransactionLength() float[source]
+

get the variance transaction length +:return: variance transaction length +:rtype: float

+
+ +
+
+plotGraphs() None[source]
+
+ +
+
+printStats() None[source]
+
+ +
+
+readDatabase() None[source]
+

read database from input file and store into database and size of each transaction. +And store the period between transactions as list

+
+ +
+
+run() None[source]
+
+ +
+
+save(data: dict, outputFile: str) None[source]
+

store data into outputFile +:param data: input data +:type data: dict +:param outputFile: output file name or path to store +:type outputFile: str +:return: None

+
+ +
+
+ +
+
+

PAMI.extras.dbStats.TransactionalDatabase module

+
+
+class PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase(inputFile: str | DataFrame, sep: str = '\t')[source]
+

Bases: object

+
+
Description:
+

TransactionalDatabase is class to get stats of database.

+
+
Attributes:
+
+
param inputFile:
+

file : +input file path

+
+
param sep:
+

str +separator in file. Default is tab space.

+
+
+
+
Methods:
+
+
run()

execute readDatabase function

+
+
readDatabase()

read database from input file

+
+
getDatabaseSize()

get the size of database

+
+
getMinimumTransactionLength()

get the minimum transaction length

+
+
getAverageTransactionLength()

get the average transaction length. It is sum of all transaction length divided by database length.

+
+
getMaximumTransactionLength()

get the maximum transaction length

+
+
getStandardDeviationTransactionLength()

get the standard deviation of transaction length

+
+
getSortedListOfItemFrequencies()

get sorted list of item frequencies

+
+
getSortedListOfTransactionLength()

get sorted list of transaction length

+
+
save(data, outputFile)

store data into outputFile

+
+
getMinimumPeriod()

get the minimum period

+
+
getAveragePeriod()

get the average period

+
+
getMaximumPeriod()

get the maximum period

+
+
getStandardDeviationPeriod()

get the standard deviation period

+
+
getNumberOfTransactionsPerTimestamp()

get number of transactions per time stamp. This time stamp range is 1 to max period.

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.dbStats import TransactionalDatabase as db
+
+obj = db.TransactionalDatabase(iFile, "     ")
+
+obj.save(oFile)
+
+obj.run()
+
+obj.printStats()
+
+
+
+
+convertDataIntoMatrix() ndarray[source]
+
+ +
+
+getAverageTransactionLength() float[source]
+

get the average transaction length. It is sum of all transaction length divided by database length. +:return: average transaction length +:rtype: float

+
+ +
+
+getDatabaseSize() int[source]
+

get the size of database +:return: dataset size +:rtype: int

+
+ +
+
+getDensity() float[source]
+

get the sparsity of database. sparsity is percentage of 0 of database. +:return: database sparsity +:rtype: float

+
+ +
+
+getFrequenciesInRange() dict[source]
+
+ +
+
+getMaximumTransactionLength() int[source]
+

get the maximum transaction length +:return: maximum transaction length +:rtype: int

+
+ +
+
+getMinimumTransactionLength() int[source]
+

get the minimum transaction length +:return: minimum transaction length +:rtype: int

+
+ +
+
+getNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getSortedListOfItemFrequencies() dict[source]
+

get sorted list of item frequencies +:return: item frequencies +:rtype: dict

+
+ +
+
+getSparsity() float[source]
+

get the sparsity of database. sparsity is percentage of 0 of database. +:return: database sparsity +:rtype: float

+
+ +
+
+getStandardDeviationTransactionLength() float[source]
+

get the standard deviation transaction length +:return: standard deviation transaction length +:rtype: float

+
+ +
+
+getTotalNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getTransanctionalLengthDistribution() dict[source]
+

Get transaction length +:return: a dictionary with transaction length as keys and their total length as values +:rtype: dict

+
+ +
+
+getVarianceTransactionLength() float[source]
+

get the variance transaction length +:return: variance transaction length +:rtype: float

+
+ +
+
+plotGraphs() None[source]
+
+ +
+
+printStats() None[source]
+
+ +
+
+readDatabase() None[source]
+

read database from input file and store into database and size of each transaction.

+
+ +
+
+run() None[source]
+
+ +
+
+save(data: dict, outputFile: str) None[source]
+

store data into outputFile +:param data: input data +:type data: dict +:param outputFile: output file name or path to store +:type outputFile: str +:return: None

+
+ +
+
+ +
+
+

PAMI.extras.dbStats.UncertainTemporalDatabase module

+
+
+class PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase(inputFile: str, sep: str = '\t')[source]
+

Bases: object

+
+
Description:
+

UncertainTemporalDatabaseStats is class to get stats of database.

+
+
Attributes:
+
+
:param inputFilefile

input file path

+
+
:param sepstr

separator in file. Default is tab space.

+
+
+
+
Methods:
+
+
run()

execute readDatabase function

+
+
readDatabase()

read database from input file

+
+
getDatabaseSize()

get the size of database

+
+
getMinimumTransactionLength()

get the minimum transaction length

+
+
getAverageTransactionLength()

get the average transaction length. It is sum of all transaction length divided by database length.

+
+
getMaximumTransactionLength()

get the maximum transaction length

+
+
getStandardDeviationTransactionLength()

get the standard deviation of transaction length

+
+
getSortedListOfItemFrequencies()

get sorted list of item frequencies

+
+
getSortedListOfTransactionLength()

get sorted list of transaction length

+
+
save(data, outputFile)

store data into outputFile

+
+
getMinimumPeriod()

get the minimum period

+
+
getAveragePeriod()

get the average period

+
+
getMaximumPeriod()

get the maximum period

+
+
getStandardDeviationPeriod()

get the standard deviation period

+
+
getNumberOfTransactionsPerTimestamp()

get number of transactions per time stamp. This time stamp range is 1 to max period.

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.dbStats import UncertainTemporalDatabase as db
+
+obj = db.UncertainTemporalDatabase(iFile, " ")
+
+obj.save(oFile)
+
+obj.run()
+
+obj.printStats()
+
+
+
+
+convertDataIntoMatrix() ndarray[source]
+
+ +
+
+getAveragePeriod() float[source]
+

get the average period. It is sum of all period divided by number of period. +:return: average period +:rtype: float

+
+ +
+
+getAverageTransactionLength() float[source]
+

get the average transaction length. It is sum of all transaction length divided by database length. +:return: average transaction length +:rtype: float

+
+ +
+
+getDatabaseSize() int[source]
+

get the size of database +:return: dataset size +:rtype: int

+
+ +
+
+getDensity() float[source]
+

get the sparsity of database. sparsity is percentage of 0 of database. +:return: database sparsity +:rtype: float

+
+ +
+
+getFrequenciesInRange() dict[source]
+
+ +
+
+getMaximumPeriod() int[source]
+

get the maximum period +:return: maximum period +:rtype: int

+
+ +
+
+getMaximumTransactionLength() int[source]
+

get the maximum transaction length +:return: maximum transaction length +:rtype: int

+
+ +
+
+getMinimumPeriod() int[source]
+

get the minimum period +:return: minimum period +:rtype: int

+
+ +
+
+getMinimumTransactionLength() int[source]
+

get the minimum transaction length +:return: minimum transaction length +:rtype: int

+
+ +
+
+getNumberOfTransactionsPerTimestamp() dict[source]
+

get number of transactions per time stamp +:return: number of transactions per time stamp as dict +:rtype: float

+
+ +
+
+getSortedListOfItemFrequencies() dict[source]
+

get sorted list of item frequencies +:return: item frequencies +:rtype: dict

+
+ +
+
+getSparsity() float[source]
+

get the sparsity of database. sparsity is percentage of 0 of database. +:return: database sparsity +:rtype: float

+
+ +
+
+getStandardDeviationPeriod() float[source]
+

get the standard deviation period +:return: standard deviation period +:rtype: float

+
+ +
+
+getStandardDeviationTransactionLength() float[source]
+

get the standard deviation transaction length +:return: standard deviation transaction length +:rtype: float

+
+ +
+
+getTotalNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getTransanctionalLengthDistribution() dict[source]
+

get transaction length +:return: transactional length +:rtype: dict

+
+ +
+
+getVarianceTransactionLength() float[source]
+

get the variance transaction length +:return: variance transaction length +:rtype: float

+
+ +
+
+plotGraphs() None[source]
+
+ +
+
+printStats() None[source]
+
+ +
+
+readDatabase() None[source]
+

read database from input file and store into database and size of each transaction. +And store the period between transactions as list

+
+ +
+
+run() None[source]
+
+ +
+
+save(data: dict, outputFile: str) None[source]
+

store data into outputFile +:param data: input data +:type data: dict +:param outputFile: output file name or path to store +:type outputFile: str +:return: None

+
+ +
+
+ +
+
+

PAMI.extras.dbStats.UncertainTransactionalDatabase module

+
+
+class PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase(inputFile: str, sep: str = '\t')[source]
+

Bases: object

+
+
Description:
+

UncertainTransactionalDatabase is class to get stats of database.

+
+
Attributes:
+
+
inputFilefile

input file path

+
+
sepstr

separator in file. Default is tab space.

+
+
+
+
Methods:
+
+
run()

execute readDatabase function

+
+
readDatabase()

read database from input file

+
+
getDatabaseSize()

get the size of database

+
+
getMinimumTransactionLength()

get the minimum transaction length

+
+
getAverageTransactionLength()

get the average transaction length. It is sum of all transaction length divided by database length.

+
+
getMaximumTransactionLength()

get the maximum transaction length

+
+
getStandardDeviationTransactionLength()

get the standard deviation of transaction length

+
+
getVarianceTransactionLength()

get the variance of transaction length

+
+
getSparsity()

get the sparsity of database

+
+
getSortedListOfItemFrequencies()

get sorted list of item frequencies

+
+
getSortedListOfTransactionLength()

get sorted list of transaction length

+
+
save(data, outputFile)

store data into outputFile

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.dbStats import UncertainTransactionalDatabase as db
+
+obj = db.UncertainTransactionalDatabase(iFile, "    ")
+
+obj.save(oFile)
+
+obj.run()
+
+obj.printStats()
+
+
+
+
+convertDataIntoMatrix() ndarray[source]
+
+ +
+
+getAverageTransactionLength() float[source]
+

get the average transaction length. It is sum of all transaction length divided by database length. +:return: average transaction length +:rtype: float

+
+ +
+
+getDatabaseSize() int[source]
+

get the size of database +:return: dataset size +:rtype: int

+
+ +
+
+getDensity() float[source]
+

get the sparsity of database. sparsity is percentage of 0 of database. +:return: database sparsity +:rtype: float

+
+ +
+
+getFrequenciesInRange() dict[source]
+
+ +
+
+getMaximumTransactionLength() int[source]
+

get the maximum transaction length +:return: maximum transaction length +:rtype: int

+
+ +
+
+getMinimumTransactionLength() int[source]
+

get the minimum transaction length +:return: minimum transaction length +:rtype: int

+
+ +
+
+getNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getSortedListOfItemFrequencies() dict[source]
+

get sorted list of item frequencies +:return: item frequencies +:rtype: dict

+
+ +
+
+getSparsity() float[source]
+

get the sparsity of database. sparsity is percentage of 0 of database. +:return: database sparsity +:rtype: float

+
+ +
+
+getStandardDeviationTransactionLength() float[source]
+

get the standard deviation transaction length +:return: standard deviation transaction length +:rtype: float

+
+ +
+
+getTotalNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getTransanctionalLengthDistribution() dict[source]
+

get transaction length +:return: transactional length +:rtype: dict

+
+ +
+
+getVarianceTransactionLength() float[source]
+

get the variance transaction length +:return: variance transaction length +:rtype: float

+
+ +
+
+plotGraphs() None[source]
+
+ +
+
+printStats() None[source]
+
+ +
+
+readDatabase() None[source]
+

read database from input file and store into database and size of each transaction.

+
+ +
+
+run() None[source]
+
+ +
+
+save(data: dict, outputFile: str) None[source]
+

store data into outputFile +:param data: input data +:type data: dict +:param outputFile: output file name or path to store +:type outputFile: str +:return: None

+
+ +
+
+ +
+
+

PAMI.extras.dbStats.UtilityDatabase module

+
+
+class PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase(inputFile: str | DataFrame, sep: str = '\t')[source]
+

Bases: object

+
+
Description:
+

UtilityDatabase is class to get stats of database.

+
+
Attributes:
+
+
param inputFile:
+

file : +input file path

+
+
param sep:
+

str +separator in file. Default is tab space.

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.dbStats import UtilityDatabase as db
+
+obj = db.UtilityDatabase(iFile, "   " )
+
+obj.save(oFile)
+
+obj.run()
+
+obj.printStats()
+
+
+
+
+creatingItemSets() None[source]
+

Storing the complete transactions of the database/input file in a database variable

+
+ +
+
+getAverageTransactionLength() float[source]
+

get the average transaction length. It is sum of all transaction length divided by database length. +:return: average transaction length +:rtype: float

+
+ +
+
+getAverageUtility() float[source]
+

get the average utility +:return: average utility +:rtype: float

+
+ +
+
+getDatabaseSize() int[source]
+

get the size of database +:return: size of database +:rtype: int

+
+ +
+
+getFrequenciesInRange() dict[source]
+

This function is used to get the Frequencies in range +:return: Frequencies In Range +:rtype: dict

+
+ +
+
+getMaximumTransactionLength() int[source]
+

get the maximum transaction length +:return: maximum transaction length +:rtype: int

+
+ +
+
+getMaximumUtility() int[source]
+

get the maximum utility +:return: integer value of maximum utility +:rtype: int

+
+ +
+
+getMinimumTransactionLength() int[source]
+

get the minimum transaction length +:return: minimum transaction length +:rtype: int

+
+ +
+
+getMinimumUtility() int[source]
+

get the minimum utility +:return: integer value of minimum utility +:rtype: int

+
+ +
+
+getNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getSortedListOfItemFrequencies() dict[source]
+

get sorted list of item frequencies +:return: item frequencies +:rtype: dict

+
+ +
+
+getSortedUtilityValuesOfItem() dict[source]
+

get sorted utility value each item. key is item and value is utility of item +:return: sorted dictionary utility value of item +:rtype: dict

+
+ +
+
+getSparsity() float[source]
+

get the sparsity of database +:return: sparsity of database in floating values +:rtype: float

+
+ +
+
+getStandardDeviationTransactionLength() float[source]
+

get the standard deviation transaction length +:return: standard deviation transaction length +:rtype: float

+
+ +
+
+getTotalNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getTotalUtility() int[source]
+

get sum of utility +:return: total utility +:rtype: int

+
+ +
+
+getTransanctionalLengthDistribution() dict[source]
+

get transaction length +:return: a dictionary of Transaction Length Distribution +:rtype: dict

+
+ +
+
+getVarianceTransactionLength() float[source]
+

get the variance transaction length +:return: variance transaction length +:rtype: float

+
+ +
+
+plotGraphs() None[source]
+
+ +
+
+printStats() None[source]
+

This function is used to print the results

+
+ +
+
+readDatabase() None[source]
+

read database from input file and store into database and size of each transaction.

+
+ +
+
+run() None[source]
+
+ +
+
+save(data, outputFile) None[source]
+

store data into outputFile +:param data: input data +:type data: dict +:param outputFile: output file name or path to store +:type outputFile: str +:return: None

+
+ +
+
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.extras.fuzzyTransformation.html b/sphinx/_build/html/PAMI.extras.fuzzyTransformation.html new file mode 100644 index 000000000..531ec1192 --- /dev/null +++ b/sphinx/_build/html/PAMI.extras.fuzzyTransformation.html @@ -0,0 +1,268 @@ + + + + + + + PAMI.extras.fuzzyTransformation package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.extras.fuzzyTransformation package

+
+

Submodules

+
+
+

PAMI.extras.fuzzyTransformation.abstract module

+
+
+

PAMI.extras.fuzzyTransformation.temporalToFuzzy module

+
+
+class PAMI.extras.fuzzyTransformation.temporalToFuzzy.temporalToFuzzy(iFile: str, fuzFile: str, oFile: str, sep: str = '\t')[source]
+

Bases: _convert

+
+
Description:
+

temporalToFuzzy is used to convert the temporal database into Fuzzy temporal database.

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • fuzFile – str : +Name of the Fuzzy File to process set of data.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.FuzzyTransformation import temporalToFuzzy as db
+
+obj = db.temporalToFuzzy(iFile, FuzFile, oFile, "   " )
+
+obj.startConvert()
+
+
+
+
+startConvert() None[source]
+

Main method to convert the temporal database into fuzzy database.

+
+ +
+
+ +
+
+

PAMI.extras.fuzzyTransformation.transactionalToFuzzy module

+
+
+class PAMI.extras.fuzzyTransformation.transactionalToFuzzy.transactionalToFuzzy(iFile: str, fuzFile: str, oFile: str, sep: str = '\t')[source]
+

Bases: _convert

+
+
Description:
+

transactionalToFuzzy is used to convert the transactional database into Fuzzy transactional database.

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • fuzFile – str : +Name of the FuzFile to process set of data.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+

+
+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
+
+

Importing this algorithm into a python program

+
+
from PAMI.extras.FuzzyTransformation import transactionalToFuzzy as db
+
+obj = db.transactionalToFuzzy(iFile, FuzFile, oFile, "     " )
+
+obj.startConvert()
+
+
+
+
+
+startConvert() None[source]
+

Main method to convert the temporal database into fuzzy database.

+
+ +
+
+ +
+
+

PAMI.extras.fuzzyTransformation.utilityToFuzzy module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.extras.generateDatabase.html b/sphinx/_build/html/PAMI.extras.generateDatabase.html new file mode 100644 index 000000000..4f58f8ecd --- /dev/null +++ b/sphinx/_build/html/PAMI.extras.generateDatabase.html @@ -0,0 +1,507 @@ + + + + + + + PAMI.extras.generateDatabase package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.extras.generateDatabase package

+
+

Submodules

+
+
+

PAMI.extras.generateDatabase.generateSpatioTemporalDatabase module

+
+
+class PAMI.extras.generateDatabase.generateSpatioTemporalDatabase.spatioTemporalDatabaseGenerator(xmin: int, xmax: int, ymin: int, ymax: int, maxTimeStamp: int, numberOfItems: int, itemChanceLow: float, itemChanceHigh: float, timeStampChanceLow: float, timeStampChanceHigh: float)[source]
+

Bases: object

+
+
Description:
+

generateSpatioTemporalDatabase is a code used to convert the database into SpatioTemporal database.

+
+
Parameters:
+
    +
  • xmin – int : +To give minimum value for x

  • +
  • xmax – int : +To give maximum value for x

  • +
  • ymin – int : +To give minimum value for y

  • +
  • ymax – int : +To give maximum value for y

  • +
  • maxTimeStamp – int : +maximum Time Stamp for the database

  • +
  • numberOfItems – int : +number of items in the database

  • +
  • itemChanceLow – int or float : +least chance for item in the database

  • +
  • itemChanceHigh – int or float : +highest chance for item in the database

  • +
  • timeStampChanceLow – int or float : +lowest time stamp value

  • +
  • timeStampChanceHigh – int or float: +highest time stamp value

  • +
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.generateDatabase import generateSpatioTemporalDatabase as db
+
+obj = db.generateSpatioTemporalDatabase(0, 100, 0, 100, 10, 10, 0.5, 0.9, 0.5, 0.9)
+
+obj.save(oFile)
+
+obj.createPoint(0,100,0,100) # values can be according to the size of data
+
+obj.saveAsFile("outputFileName") # To create a file
+
+
+
+
+alreadyAdded = {}
+
+ +
+
+coinFlip = [True, False]
+
+ +
+
+createPoint(xmin: int, xmax: int, ymin: int, ymax: int) Tuple[int, int][source]
+
+ +
+
+items = []
+
+ +
+
+outFileName = ''
+
+ +
+
+saveAsFile(outFileName='', sep='\t') None[source]
+
+ +
+
+timestamp = []
+
+ +
+
+ +
+
+

PAMI.extras.generateDatabase.generateTemporalDatabase module

+
+
+class PAMI.extras.generateDatabase.generateTemporalDatabase.generateTemporalDatabase(numOfTransactions: int, avgLenOfTransactions: int, numItems: int, outputFile: str, percentage: int = 50, sep: str = '\t', typeOfFile: str = 'Database')[source]
+

Bases: object

+
+
Description:
+

generateTemporalDatabase creates a temporal database and outputs a database or a frame depending on input

+
+
Attributes:
+
+
param numOfTransactions:
+

int +number of transactions

+
+
param avgLenOfTransactions:
+

int +average length of transactions

+
+
param numItems:
+

int +number of items

+
+
param outputFile:
+

str +output file name

+
+
param percentage:
+

int +percentage of coinToss for TID of temporalDatabase

+
+
param sep:
+

str +seperator for database output file

+
+
param typeOfFile:
+

str +specify database or dataframe to get corresponding output

+
+
+
+
Methods:
+
+
getFileName():

returns filename

+
+
createTemporalFile():

creates temporal database file or dataframe

+
+
getDatabaseAsDataFrame:

returns dataframe

+
+
performCoinFlip():

Perform a coin flip with the given probability

+
+
tuning():

Tune the arrayLength to match avgLenOfTransactions

+
+
createTemporalFile():

create Temporal database or dataframe depending on input

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.generateDatabase import generateTemporalDatabase as db
+
+numOfTransactions = 100
+numItems = 15
+avgTransactionLength = 6
+outFileName = 'temporal_ot.txt'
+sep = '     '
+percent = 75
+frameOrBase = "dataframe" # if you want to get dataframe as output
+frameOrBase = "database" # if you want to get database/csv/file as output
+
+temporalDB = db.generateTemporalDatabase(numOfTransactions, avgTransactionLength, numItems, outFileName, percent, sep, frameOrBase )
+temporalDB.createTemporalFile()
+print(temporalDB.getDatabaseAsDataFrame())
+
+
+
+
+createTemporalFile() None[source]
+

create Temporal database or dataframe depending on input +:return: None

+
+ +
+
+getDatabaseAsDataFrame() DataFrame[source]
+

return dataframe +:return: dataframe +:rtype: pd.DataFrame

+
+ +
+
+getFileName() str[source]
+

return filename +:return: filename +:rtype: str

+
+ +
+
+performCoinFlip(probability: float) bool[source]
+

Perform a coin flip with the given probability. +:param probability: probability to perform coin flip +:type probability: float +:return: True if coin flip is performed, False otherwise +:rtype: bool

+
+ +
+
+tuning(array, sumRes) list[source]
+

Tune the array so that the sum of the values is equal to sumRes

+
+
Parameters:
+
    +
  • array (list) – list of values

  • +
  • sumRes (int) – target sum

  • +
+
+
Returns:
+

list of values with the sum equal to sumRes after tuning

+
+
Return type:
+

list

+
+
+
+ +
+
+ +
+
+

PAMI.extras.generateDatabase.generateTransactionalDatabase module

+
+
+class PAMI.extras.generateDatabase.generateTransactionalDatabase.generateTransactionalDatabase(numLines, avgItemsPerLine, numItems)[source]
+

Bases: object

+

:Description Generate a transactional database with the given number of lines, average number of items per line, and total number of items

+
+
Attributes:
+

+
+
+
numLines: int
    +
  • number of lines

  • +
+
+
avgItemsPerLine: int
    +
  • average number of items per line

  • +
+
+
numItems: int
    +
  • total number of items

  • +
+
+
+
+
Methods:
+
+
create:

Generate the transactional database

+
+
save:

Save the transactional database to a file

+
+
getTransactions:

Get the transactional database

+
+
+
+
+
+
+create() None[source]
+

Generate the transactional database +:return: None

+
+ +
+
+generateArray(nums, avg, maxItems) list[source]
+

Generate a random array of length n whose values average to m

+
+
Parameters:
+
    +
  • nums (list) – number of values

  • +
  • avg (float) – average value

  • +
  • maxItems (int) – maximum value

  • +
+
+
Returns:
+

random array

+
+
Return type:
+

list

+
+
+
+ +
+
+getTransactions() DataFrame[source]
+

Get the transactional database

+
+
Returns:
+

the transactional database

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+save(filename) None[source]
+

Save the transactional database to a file

+
+
Parameters:
+

filename (str) – name of the file

+
+
Returns:
+

None

+
+
+
+ +
+
+tuning(array, sumRes) list[source]
+

Tune the array so that the sum of the values is equal to sumRes

+
+
Parameters:
+
    +
  • array (list) – list of values

  • +
  • sumRes (int) – the sum of the values in the array to be tuned

  • +
+
+
Returns:
+

list of values with the tuned values and the sum of the values in the array to be tuned and sumRes is equal to sumRes

+
+
Return type:
+

list

+
+
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.extras.graph.html b/sphinx/_build/html/PAMI.extras.graph.html new file mode 100644 index 000000000..2444a5670 --- /dev/null +++ b/sphinx/_build/html/PAMI.extras.graph.html @@ -0,0 +1,396 @@ + + + + + + + PAMI.extras.graph package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.extras.graph package

+
+

Submodules

+
+
+

PAMI.extras.graph.DF2Fig module

+
+
+class PAMI.extras.graph.DF2Fig.DF2Fig(dataFrame: DataFrame)[source]
+

Bases: object

+
+
Description:
+

DataFrameInToFigures is used to convert the given dataframe into figures.

+
+
Parameters:
+
    +
  • dataFrame – Name of the input dataframe

  • +
  • algorithm – Specify the column name containing the algorithms

  • +
  • xcolumn – Specify the name of the X-axis

  • +
  • ycolumn – Specify the name of the Y-axis

  • +
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.graph import dataframeInToFigures as fig
+
+obj = fig.dataframeInToFigures(idf)
+
+obj.plotGraphsFromDataFrame("minSup", "patterns", "algorithms")
+
+obj.plotGraphsFromDataFrame("minSup", "memory")
+
+obj.plotGraphsFromDataFrame("minSup", "runtime")
+
+
+
+
+plot(xColumn, yColumn, algorithm=None) None[source]
+

To plot graphs from given dataframe

+
+
Parameters:
+
    +
  • xColumn (str) – Name of the X-axis of the dataframe

  • +
  • yColumn (str) – Name of the Y-axis of the dataframe

  • +
  • algorithm (str) – Specify the column name containing the algorithms

  • +
+
+
Returns:
+

None

+
+
+
+ +
+
+ +
+
+

PAMI.extras.graph.DF2Tex module

+
+
+

PAMI.extras.graph.plotLineGraphFromDictionary module

+
+
+class PAMI.extras.graph.plotLineGraphFromDictionary.plotLineGraphFromDictionary(data: dict, end: int = 100, start: int = 0, title: str = '', xlabel: str = '', ylabel: str = '')[source]
+

Bases: object

+

This class plot graph of input data

+
+
Attributes:
+

+
+

:param data : dict: store input data as dict

+
+
Methods:
+
+
plotLineGraph()

draw line graph of input data. input data’s key is x and value is y.

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.graph import plotLineGraphFromDictionary as plt
+
+obj = plt.plotLineGraphFromDictionary(idict, 100, 0, " ")
+
+obj.save()
+
+
+
+
+ +
+
+

PAMI.extras.graph.plotLineGraphsFromDataFrame module

+
+
+class PAMI.extras.graph.plotLineGraphsFromDataFrame.plotGraphsFromDataFrame(dataFrame: DataFrame)[source]
+

Bases: object

+

plotLineGraphFromDataFrame is used to convert the given dataframe into plotLineGraph.

+
+
Attributes:
+
+
:param dataFrameDataFrame

store input data as DataFrame

+
+
+
+
Methods:
+
+
plotLineGraphFromDatFrame()

draw line graph of input data. input data’s key is x and value is y.

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.graph import plotLineGraphsFromDataframe as plt
+
+obj = plt.plotLineGraphsFromDataFrame(idf)
+
+obj.save()
+
+
+
+
+plotGraphsFromDataFrame() None[source]
+
+ +
+
+ +
+
+

PAMI.extras.graph.visualizeFuzzyPatterns module

+
+
+class PAMI.extras.graph.visualizeFuzzyPatterns.visualizeFuzzyPatterns(file: str, topk: int)[source]
+

Bases: object

+
+
Description:
+

visualizeFuzzyPatterns is used to visualize points produced by pattern miner .

+
+
Attributes:
+

+
+

:param file : file: store input data as file +:param topk : int: Takes the value int as input

+
+
from PAMI.extras.graph import visualizeFuzzyPatterns as viz
+
+obj = viz.visualizeFuzzyPatterns(iFile, topk)
+
+obj.save()
+
+
+
+
+
+visualize(markerSize: int = 20, zoom: int = 3, width: int = 1500, height: int = 1000) None[source]
+

Visualize points produced by pattern miner.

+
+
Parameters:
+
    +
  • markerSize (int) – Size of the marker

  • +
  • zoom (int) – Zoom level

  • +
  • width (int) – Width of the graph

  • +
  • height – Height of the graph on the screen

  • +
+
+
Returns:
+

None

+
+
+
+ +
+ +
+
+

PAMI.extras.graph.visualizePatterns module

+
+
+class PAMI.extras.graph.visualizePatterns.visualizePatterns(file: str, topk: int)[source]
+

Bases: object

+
+
Description:
+

visualizePatterns is used to visualize points produced by pattern miner .

+
+
Attributes:
+
+
:param filefile

store input data as file

+
+
:param topkint

Takes the value int as input

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.graph import visualizePatterns as viz
+
+obj = viz.visualizePatterns(iFile, topk)
+
+obj.save()
+
+
+
+
+visualize(markerSize: int = 20, zoom: int = 3, width: int = 1500, height: int = 1000) None[source]
+

Visualize points produced by pattern miner.

+
+
Parameters:
+
    +
  • markerSize (int) – Size of the marker

  • +
  • zoom (int) – Zoom level

  • +
  • width (int) – Width of the graph

  • +
  • height – Height of the graph on the screen

  • +
+
+
Returns:
+

None

+
+
+
+ +
+
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.extras.html b/sphinx/_build/html/PAMI.extras.html new file mode 100644 index 000000000..818655401 --- /dev/null +++ b/sphinx/_build/html/PAMI.extras.html @@ -0,0 +1,1252 @@ + + + + + + + PAMI.extras package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.extras package

+
+

Subpackages

+
+ +
+
+
+

Submodules

+
+
+

PAMI.extras.convertMultiTSIntoFuzzy module

+
+
+

PAMI.extras.generateLatexGraphFile module

+
+
+PAMI.extras.generateLatexGraphFile.generateLatexCode(result: DataFrame) None[source]
+
+ +
+
+class PAMI.extras.generateLatexGraphFile.generateLatexGraphFile[source]
+

Bases: object

+
+
Description:
+

GenerateLatexGraphFile is used to convert the given data into LatexGraphFile.

+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.syntheticDataGenerator import generateLatexGraphFile as fuz
+
+obj = fuz.generateLatexGraphFile(idf)
+
+obj.save()
+
+
+
+
+ +
+
+

PAMI.extras.plotPointOnMap module

+
+
+class PAMI.extras.plotPointOnMap.plotPointOnMap(inputPatterns: str, k: int = 10, sep: str = '\t')[source]
+

Bases: object

+
+
Description:
+

plotPointOnMap is used to take the input patterns and plot the points on map

+
+
Parameters:
+
    +
  • inputPatterns – str : +Name of the Input file

  • +
  • k – str : +Name of the FuzFile to process set of data.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.syntheticDataGenerator import plotPointOnMap as plt
+
+obj = plt.plotPointOnMap(" ", 10, "     ")
+
+obj.save()
+
+
+
+
+convertPOINT(patterns: List[List[str]]) DataFrame[source]
+
+ +
+
+findTopKPatterns() List[List[str]][source]
+
+ +
+
+plotPointInMap() Map[source]
+
+ +
+
+ +
+
+

PAMI.extras.plotPointOnMap_dump module

+
+
+class PAMI.extras.plotPointOnMap_dump.plotPointOnMap(inputPatterns: str, k: int = 10, sep: str = '\t')[source]
+

Bases: object

+
+
Description:
+

plotPointOnMap is used to take the input patterns and plot the points on map

+
+
Parameters:
+
    +
  • inputPatterns – str : Name of the Input file

  • +
  • k – str : Name of the FuzFile to process set of data.

  • +
  • sep – str : This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.syntheticDataGenerator import plotPointOnMap as plt
+
+obj = plt.plotPointOnMap(" ", 10, "     ")
+
+obj.save()
+
+
+
+
+convertPOINT(patterns: List[List[str]]) DataFrame[source]
+
+ +
+
+findTopKPatterns() List[List[str]][source]
+
+ +
+
+plotPointInMap() Map[source]
+
+ +
+
+ +
+
+

PAMI.extras.scatterPlotSpatialPoints module

+
+
+class PAMI.extras.scatterPlotSpatialPoints.scatterPlotSpatialPoints(iFile: str, sep: str = '\t')[source]
+

Bases: object

+
+
Description:
+

scatterPlotSpatialPoints is used to convert the given data and plot the points.

+
+
Parameters:
+
    +
  • iFile – str : Name of the Input file

  • +
  • sep – str : This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.syntheticDataGenerator import scatterPlotSpatialPoints as plt
+
+obj = plt.scatterPlotSpatialPoints(iFile, "     " )
+
+obj.save(oFile)
+
+
+
+
+scatterPlotSpatialPoints() None[source]
+
+ +
+
+ +
+
+

PAMI.extras.topKPatterns module

+
+
+class PAMI.extras.topKPatterns.topKPatterns(inputFile: str, k: int = 10, sep: str = '\t')[source]
+

Bases: object

+
+
Description:
+

find top k length patterns in input file.

+
+
Attributes:
+
+
inputFilestr

input file name or path

+
+
kint

rank of pattern length. default is 10

+
+
sepstr

separator which separate patterns in input file. default is tab space

+
+
+
+
Methods:
+
+
getTopKPatterns()

return top k patterns as dict

+
+
storeTopKPatterns(outputFile)

store top k patterns into output file.

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.syntheticDataGenerator import topKPatterns as plt
+
+obj = plt.scatterPlotSpatialPoints(" ", 10, "   " )
+
+obj.save()
+
+
+
+
+getTopKPatterns() dict[source]
+

get top k length patterns. user can defined k value.

+
+
Returns:
+

top k length patterns as dictionary. top k patterns = {patternId: pattern}

+
+
+
+ +
+
+save(outputFile: str) None[source]
+

store top k length patterns into file. user can defined k value.

+
+
Parameters:
+

outputFile (str) – output file name or path

+
+
+
+ +
+
+ +
+
+

PAMI.extras.uncertaindb_convert module

+
+
+class PAMI.extras.uncertaindb_convert.predictedClass2Transaction(predicted_classes: list, minThreshold: float = 0.8)[source]
+

Bases: object

+
+
Description:
+

This is used to convert the given database and predict classes.

+
+
Parameters:
+
    +
  • predicted_classes – list: It is dense DataFrame

  • +
  • minThreshold – int or float : minimum threshold User defined value.

  • +
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.syntheticDataGenerator import uncertaindb_convert as un
+
+obj = un.uncertaindb_convert(predicted_classes, 0.8)
+
+obj.save(oFile)
+
+
+
+
+getBinaryTransaction(predicted_classes: list, minThreshold: float = 0.8) dict[source]
+
+ +
+
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.extras.image2Database.html b/sphinx/_build/html/PAMI.extras.image2Database.html new file mode 100644 index 000000000..462202059 --- /dev/null +++ b/sphinx/_build/html/PAMI.extras.image2Database.html @@ -0,0 +1,167 @@ + + + + + + + PAMI.extras.image2Database package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.extras.image2Database package

+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.extras.imageProcessing.html b/sphinx/_build/html/PAMI.extras.imageProcessing.html new file mode 100644 index 000000000..eccf9c194 --- /dev/null +++ b/sphinx/_build/html/PAMI.extras.imageProcessing.html @@ -0,0 +1,237 @@ + + + + + + + PAMI.extras.imageProcessing package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.extras.imageProcessing package

+
+

Submodules

+
+
+

PAMI.extras.imageProcessing.imagery2Databases module

+
+
+class PAMI.extras.imageProcessing.imagery2Databases.createDatabase(detected_objects: list, threshold: float)[source]
+

Bases: object

+
+
Description:
+

imagery2Databases is a code used to creating transactional database by applying threshold

+
+
Parameters:
+
    +
  • detected_objects – list : +List data to be processed

  • +
  • threshold – int : +It is threshold value of all item

  • +
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.imageProcessing import imagery2Databases as db
+
+obj = db.imagery2Databases(detected_obj, 16 )
+
+obj.save(oFile)
+
+
+
+
+getDataFrame() DataFrame[source]
+
+ +
+
+saveAsTemporalDB(outputFile: str, sep: str)[source]
+
+ +
+
+saveAsTransactionalDB(outputFile: str, sep: str) None[source]
+
+ +
+
+saveAsUncertainTemporalDB(outputFile: str, sep: str) None[source]
+
+ +
+
+saveAsUncertainTransactionalDB(outputFile: str, sep: str) None[source]
+
+ +
+
+saveAsUtilityTemporalDB(outputFile: str, sep: str) None[source]
+
+ +
+
+saveAsUtilityTransactionalDB(outputFile: str, sep: str) None[source]
+
+ +
+
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.extras.messaging.html b/sphinx/_build/html/PAMI.extras.messaging.html new file mode 100644 index 000000000..96b7a6b80 --- /dev/null +++ b/sphinx/_build/html/PAMI.extras.messaging.html @@ -0,0 +1,198 @@ + + + + + + + PAMI.extras.messaging package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.extras.messaging package

+
+

Submodules

+
+
+

PAMI.extras.messaging.discord module

+
+
+class PAMI.extras.messaging.discord.discord(url: str)[source]
+

Bases: object

+
+
+send(message: str) None[source]
+
+ +
+ +
+
+

PAMI.extras.messaging.gmail module

+
+
+class PAMI.extras.messaging.gmail.gmail(userName: str, password: str)[source]
+

Bases: object

+
+
+send(toAddress: str, subject: str, body: str) None[source]
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.extras.neighbours.html b/sphinx/_build/html/PAMI.extras.neighbours.html new file mode 100644 index 000000000..a272585fd --- /dev/null +++ b/sphinx/_build/html/PAMI.extras.neighbours.html @@ -0,0 +1,329 @@ + + + + + + + PAMI.extras.neighbours package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.extras.neighbours package

+
+

Submodules

+
+
+

PAMI.extras.neighbours.findNeighborsUsingEuclideanDistanceforPointInfo module

+
+
+class PAMI.extras.neighbours.findNeighborsUsingEuclideanDistanceforPointInfo.createNeighborhoodFileUsingEuclideanDistance(iFile: str, oFile: str, maxEucledianDistance: int, seperator='\t')[source]
+

Bases: object

+

This class create a neighbourhood file using euclid distance.

+
+
Attribute:
+
+
:param iFilefile

Input file name or path of the input file

+
+
:param oFilefile

Output file name or path pf the output file

+
+
:param maxEuclideanDistanceint

The user can specify maxEuclideanDistance. +This program find pairs of values whose Euclidean distance is less than or equal to maxEucledianDistace +and store the pairs.

+
+
+
+
param seperator:
+

str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

+
+
+
+
Methods:
+
+
mine()

find and store the pairs of values whose Euclidean distance is less than or equal to maxEucledianDistace.

+
+
getFileName()

This function returns output file name.

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.neighbours import findNeighboursUsingEuclidean as db
+
+obj = db.findNeighboursUsingEuclidean(iFile, oFile, 10, "   ")
+
+obj.save()
+
+
+
+
+getFileName() str[source]
+
+ +
+
+ +
+
+

PAMI.extras.neighbours.findNeighboursUsingEuclidean module

+
+
+class PAMI.extras.neighbours.findNeighboursUsingEuclidean.createNeighborhoodFileUsingEuclideanDistance(iFile: str, oFile: str, maxEucledianDistance: int, seperator='\t')[source]
+

Bases: object

+

This class create a neighbourhood file using euclid distance.

+
+
Attribute:
+
+
:param iFilefile

Input file name or path of the input file

+
+
:param oFilefile

Output file name or path pf the output file

+
+
:param maxEuclideanDistanceint

The user can specify maxEuclideanDistance. +This program find pairs of values whose Euclidean distance is less than or equal to maxEucledianDistace +and store the pairs.

+
+
+
+
param seperator:
+

str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

+
+
+
+
Methods:
+
+
mine()

find and store the pairs of values whose Euclidean distance is less than or equal to maxEucledianDistace.

+
+
getFileName()

This function returns output file name.

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.neighbours import findNeighboursUsingEuclidean as db
+
+obj = db.findNeighboursUsingEuclidean(iFile, oFile, 10, "   ")
+
+obj.save()
+
+
+
+
+getFileName() str[source]
+
+ +
+
+ +
+
+

PAMI.extras.neighbours.findNeighboursUsingGeodesic module

+
+
+class PAMI.extras.neighbours.findNeighboursUsingGeodesic.createNeighborhoodFileUsingGeodesicDistance(iFile: str, oFile: str, maxDistance: float, seperator='\t')[source]
+

Bases: object

+

This class create a neighbourhood file using Geodesic distance.

+
+
Attribute:
+
+
:param iFilefile

Input file name or path of the input file

+
+
:param oFilefile

Output file name or path pf the output file

+
+
:param maxDistancefloat

The user can specify maxDistance in Km(Kilometers). +This program find pairs of values whose Geodesic distance is less than or equal to maxDistace +and store the pairs.

+
+
+
+
param seperator:
+

str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

+
+
+
+
Methods:
+
+
mine()

find and store the pairs of values whose Geodesic distance is less than or equal to maxDistace.

+
+
getFileName()

This function returns output file name.

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.neighbours import findNeighboursUsingGeodesic as db
+
+ obj = db.findNeighboursUsingGeodesic(iFile, oFile, 10, "   ")
+
+obj.save()
+
+
+
+
+getFileName()[source]
+
+ +
+
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.extras.sampleDatasets.html b/sphinx/_build/html/PAMI.extras.sampleDatasets.html new file mode 100644 index 000000000..655f18f59 --- /dev/null +++ b/sphinx/_build/html/PAMI.extras.sampleDatasets.html @@ -0,0 +1,167 @@ + + + + + + + PAMI.extras.sampleDatasets package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.extras.sampleDatasets package

+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.extras.stats.html b/sphinx/_build/html/PAMI.extras.stats.html new file mode 100644 index 000000000..76d12c86b --- /dev/null +++ b/sphinx/_build/html/PAMI.extras.stats.html @@ -0,0 +1,1170 @@ + + + + + + + PAMI.extras.stats package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.extras.stats package

+
+

Submodules

+
+
+

PAMI.extras.stats.TransactionalDatabase module

+
+
+class PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase(inputFile: str | DataFrame, sep: str = '\t')[source]
+

Bases: object

+
+
Description:
+

TransactionalDatabase is class to get stats of database.

+
+
Attributes:
+
+
param inputFile:
+

file : +input file path

+
+
param sep:
+

str +separator in file. Default is tab space.

+
+
+
+
Methods:
+
+
run()

execute readDatabase function

+
+
readDatabase()

read database from input file

+
+
getDatabaseSize()

get the size of database

+
+
getMinimumTransactionLength()

get the minimum transaction length

+
+
getAverageTransactionLength()

get the average transaction length. It is sum of all transaction length divided by database length.

+
+
getMaximumTransactionLength()

get the maximum transaction length

+
+
getStandardDeviationTransactionLength()

get the standard deviation of transaction length

+
+
getSortedListOfItemFrequencies()

get sorted list of item frequencies

+
+
getSortedListOfTransactionLength()

get sorted list of transaction length

+
+
save(data, outputFile)

store data into outputFile

+
+
getMinimumPeriod()

get the minimum period

+
+
getAveragePeriod()

get the average period

+
+
getMaximumPeriod()

get the maximum period

+
+
getStandardDeviationPeriod()

get the standard deviation period

+
+
getNumberOfTransactionsPerTimestamp()

get number of transactions per time stamp. This time stamp range is 1 to max period.

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.stats import TransactionalDatabase as db
+
+obj = db.TransactionalDatabase(iFile, "     ")
+
+obj.save(oFile)
+
+obj.run()
+
+obj.printStats()
+
+
+
+
+convertDataIntoMatrix() ndarray[source]
+
+ +
+
+getAverageTransactionLength() float[source]
+

get the average transaction length. It is sum of all transaction length divided by database length. +:return: average transaction length +:rtype: float

+
+ +
+
+getDatabaseSize() int[source]
+

get the size of database +:return: dataset size +:rtype: int

+
+ +
+
+getDensity() float[source]
+

get the sparsity of database. sparsity is percentage of 0 of database. +:return: database sparsity +:rtype: float

+
+ +
+
+getFrequenciesInRange() dict[source]
+
+ +
+
+getMaximumTransactionLength() int[source]
+

get the maximum transaction length +:return: maximum transaction length +:rtype: int

+
+ +
+
+getMinimumTransactionLength() int[source]
+

get the minimum transaction length +:return: minimum transaction length +:rtype: int

+
+ +
+
+getNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getSortedListOfItemFrequencies() dict[source]
+

get sorted list of item frequencies +:return: item frequencies +:rtype: dict

+
+ +
+
+getSparsity() float[source]
+

get the sparsity of database. sparsity is percentage of 0 of database. +:return: database sparsity +:rtype: float

+
+ +
+
+getStandardDeviationTransactionLength() float[source]
+

get the standard deviation transaction length +:return: standard deviation transaction length +:rtype: float

+
+ +
+
+getTotalNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getTransanctionalLengthDistribution() dict[source]
+

Get transaction length +:return: a dictionary with transaction +:rtype: dict

+
+ +
+
+getVarianceTransactionLength() float[source]
+

get the variance transaction length +:return: variance transaction length +:rtype: float

+
+ +
+
+plotGraphs() None[source]
+
+ +
+
+printStats() None[source]
+
+ +
+
+run() None[source]
+

read database from input file and store into database and size of each transaction.

+
+ +
+
+save(data: dict, outputFile: str) None[source]
+

store data into outputFile +:param data: input data +:type data: dict +:param outputFile: output file name or path to store +:type outputFile: str +:return: None

+
+ +
+
+ +
+
+

PAMI.extras.stats.graphDatabase module

+
+
+class PAMI.extras.stats.graphDatabase.graphDatabase(iFile)[source]
+

Bases: object

+
+
+plotEdgeDistribution()[source]
+
+ +
+
+plotNodeDistribution()[source]
+
+ +
+
+printGraphDatabaseStatistics()[source]
+
+ +
+
+printIndividualGraphStats()[source]
+
+ +
+ +
+
+

PAMI.extras.stats.sequentialDatabase module

+
+
+class PAMI.extras.stats.sequentialDatabase.sequentialDatabase(inputFile: str, sep: str = '\t')[source]
+

Bases: object

+

SequentialDatabase is to get stats of database like avarage, minimun, maximum and so on.

+
+
Attributes:
+
+
param inputFile:
+

file : +input file path

+
+
param sep:
+

str +separator in file. Default is tab space.

+
+
+
+
Methods:
+
+
readDatabase():

read sequential database from input file and store into database and size of each sequence and subsequences.

+
+
getDatabaseSize(self):

get the size of database

+
+
getTotalNumberOfItems(self):

get the number of items in database.

+
+
getMinimumSequenceLength(self):

get the minimum sequence length

+
+
getAverageSubsequencePerSequenceLength(self):

get the average subsequence length per sequence length. It is sum of all subsequence length divided by sequence length.

+
+
getAverageItemPerSubsequenceLength(self):

get the average Item length per subsequence. It is sum of all item length divided by subsequence length.

+
+
getMaximumSequenceLength(self):

get the maximum sequence length

+
+
getStandardDeviationSubsequenceLength(self):

get the standard deviation subsequence length

+
+
getVarianceSequenceLength(self):

get the variance Sequence length

+
+
getSequenceSize(self):

get the size of sequence

+
+
getMinimumSubsequenceLength(self):

get the minimum subsequence length

+
+
getAverageItemPerSequenceLength(self):

get the average item length per sequence. It is sum of all item length divided by sequence length.

+
+
getMaximumSubsequenceLength(self):

get the maximum subsequence length

+
+
getStandardDeviationSubsequenceLength(self):

get the standard deviation subsequence length

+
+
getVarianceSubsequenceLength(self):

get the variance subSequence length

+
+
getSortedListOfItemFrequencies(self):

get sorted list of item frequencies

+
+
getFrequenciesInRange(self):

get sorted list of item frequencies in some range

+
+
getSequencialLengthDistribution(self):

get Sequence length Distribution

+
+
getSubsequencialLengthDistribution(self):

get subSequence length distribution

+
+
printStats(self):

to print the all status of sequence database

+
+
plotGraphs(self):

to plot the distribution about items, subsequences in sequence and items in subsequence

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.dbStats import SequentialDatabase as db
+
+obj = db.SequentialDatabase(iFile, "        ")
+
+obj.save(oFile)
+
+obj.run()
+
+obj.printStats()
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 sequentialDatabase.py <inputFile>
+
+Example Usage:
+
+(.venv) $ python3 sequentialDatabase.py sampleDB.txt
+
+
+
+
+

Sample run of the importing code:

+
+

import PAMI.extra.DBstats.SequentialDatabase as alg +_ap=alg.SequentialDatabase(inputfile,sep) +_ap.readDatabase() +_ap.printStats() +_ap.plotGraphs()

+
+
+
+

Credits:

+
+

The complete program was written by Shota Suzuki under the supervision of Professor Rage Uday Kiran.

+
+
+
+getAverageItemPerSequenceLength() float[source]
+

get the average item length per sequence. It is sum of all item length divided by sequence length. +:return: average item length per sequence +:rtype: float

+
+ +
+
+getAverageItemPerSubsequenceLength() float[source]
+

get the average Item length per subsequence. It is sum of all item length divided by subsequence length. +:return: average Item length per subsequence +:rtype: float

+
+ +
+
+getAverageSubsequencePerSequenceLength() float[source]
+

get the average subsequence length per sequence length. It is sum of all subsequence length divided by sequence length. +:return: average subsequence length per sequence length +:rtype: float

+
+ +
+
+getDatabaseSize() int[source]
+

get the size of database +:return: dataset size +:rtype: int

+
+ +
+
+getFrequenciesInRange() Dict[int, int][source]
+

get sorted list of item frequencies in some range +:return: item separated by its frequencies +:rtype: dict

+
+ +
+
+getMaximumSequenceLength() int[source]
+

get the maximum sequence length +:return: maximum sequence length +:rtype: int

+
+ +
+
+getMaximumSubsequenceLength() int[source]
+

get the maximum subsequence length +:return: maximum subsequence length +:rtype: int

+
+ +
+
+getMinimumSequenceLength() int[source]
+

get the minimum sequence length +:return: minimum sequence length +:rtype: int

+
+ +
+
+getMinimumSubsequenceLength() int[source]
+

get the minimum subsequence length +:return: minimum subsequence length +:rtype: int

+
+ +
+
+getSequenceSize() int[source]
+

get the size of sequence +:return: sequences size +:rtype: int

+
+ +
+
+getSequencialLengthDistribution() Dict[int, int][source]
+

get Sequence length Distribution +:return: Sequence length +:rtype: dict

+
+ +
+
+getSortedListOfItemFrequencies() Dict[str, int][source]
+

get sorted list of item frequencies +:return: item frequencies +:rtype: dict

+
+ +
+
+getStandardDeviationSequenceLength() float[source]
+

get the standard deviation sequence length +:return: standard deviation sequence length +:rtype: float

+
+ +
+
+getStandardDeviationSubsequenceLength() float[source]
+

get the standard deviation subsequence length +:return: standard deviation subsequence length +:rtype: float

+
+ +
+
+getSubsequencialLengthDistribution() Dict[int, int][source]
+

get subSequence length distribution +:return: subSequence length +:rtype: dict

+
+ +
+
+getTotalNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getVarianceSequenceLength() float[source]
+

get the variance Sequence length +:return: variance Sequence length +:rtype: float

+
+ +
+
+getVarianceSubsequenceLength() float[source]
+

get the variance subSequence length +:return: variance subSequence length +:rtype: float

+
+ +
+
+plotGraphs() None[source]
+

To plot the distribution about items, subsequences in sequence and items in subsequence

+
+ +
+
+printStats() None[source]
+

To print the all status of sequence database

+
+ +
+
+readDatabase() None[source]
+

read sequential database from input file and store into database and size of each sequence and subsequences.

+
+ +
+
+run() None[source]
+
+ +
+
+ +
+
+

PAMI.extras.stats.temporalDatabase module

+
+
+class PAMI.extras.stats.temporalDatabase.temporalDatabase(inputFile: str | DataFrame, sep: str = '\t')[source]
+

Bases: object

+
+
Description:
+

TemporalDatabase is class to get stats of database.

+
+
Attributes:
+
+
:param inputFilefile

input file path

+
+
:param sepstr

separator in file. Default is tab space.

+
+
+
+
Methods:
+
+
run()

execute readDatabase function

+
+
readDatabase()

read database from input file

+
+
getDatabaseSize()

get the size of database

+
+
getMinimumTransactionLength()

get the minimum transaction length

+
+
getAverageTransactionLength()

get the average transaction length. It is sum of all transaction length divided by database length.

+
+
getMaximumTransactionLength()

get the maximum transaction length

+
+
getStandardDeviationTransactionLength()

get the standard deviation of transaction length

+
+
getSortedListOfItemFrequencies()

get sorted list of item frequencies

+
+
getSortedListOfTransactionLength()

get sorted list of transaction length

+
+
save(data, outputFile)

store data into outputFile

+
+
getMinimumPeriod()

get the minimum period

+
+
getAveragePeriod()

get the average period

+
+
getMaximumPeriod()

get the maximum period

+
+
getStandardDeviationPeriod()

get the standard deviation period

+
+
getNumberOfTransactionsPerTimestamp()

get number of transactions per time stamp. This time stamp range is 1 to max period.

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.dbStats import TemporalDatabase as db
+
+obj = db.TemporalDatabase(iFile, "  ")
+
+obj.save(oFile)
+
+obj.run()
+
+obj.printStats()
+
+
+
+
+convertDataIntoMatrix() ndarray[source]
+
+ +
+
+getAverageInterArrivalPeriod() float[source]
+

get the average inter arrival period. It is sum of all period divided by number of period. +:return: average inter arrival period +:rtype: float

+
+ +
+
+getAveragePeriodOfItem() float[source]
+

get the average period of the item +:return: average period +:rtype: float

+
+ +
+
+getAverageTransactionLength() float[source]
+

get the average transaction length. It is sum of all transaction length divided by database length. +:return: average transaction length +:rtype: float

+
+ +
+
+getDatabaseSize() int[source]
+

get the size of database +:return: dataset size +:rtype: int

+
+ +
+
+getDensity() float[source]
+

get the sparsity of database. sparsity is percentage of 0 of database. +:return: database sparsity +:rtype: float

+
+ +
+
+getFrequenciesInRange() Dict[int, int][source]
+
+ +
+
+getMaximumInterArrivalPeriod() int[source]
+

get the maximum inter arrival period +:return: maximum inter arrival period +:rtype: int

+
+ +
+
+getMaximumPeriodOfItem() int[source]
+

get the maximum period of the item +:return: maximum period +:rtype:int

+
+ +
+
+getMaximumTransactionLength() int[source]
+

get the maximum transaction length +:return: maximum transaction length +:rtype: int

+
+ +
+
+getMinimumInterArrivalPeriod() int[source]
+

get the minimum inter arrival period +:return: minimum inter arrival period +:rtype: int

+
+ +
+
+getMinimumPeriodOfItem() int[source]
+

get the minimum period of the item +:return: minimum period +:rtype: int

+
+ +
+
+getMinimumTransactionLength() int[source]
+

get the minimum transaction length +:return: minimum transaction length +:rtype: int

+
+ +
+
+getNumberOfTransactionsPerTimestamp() Dict[int, int][source]
+

get number of transactions per time stamp +:return: number of transactions per time stamp as dict +:rtype: dict

+
+ +
+
+getPeriodsInRange() Dict[int, int][source]
+
+ +
+
+getSortedListOfItemFrequencies() Dict[str, int][source]
+

get sorted list of item frequencies +:return: item frequencies +:rtype: dict

+
+ +
+
+getSparsity() float[source]
+

get the sparsity of database. sparsity is percentage of 0 of database. +:return: database sparsity +:rtype: float

+
+ +
+
+getStandardDeviationPeriod() float[source]
+

get the standard deviation period +:return: standard deviation period +:rtype: float

+
+ +
+
+getStandardDeviationTransactionLength() float[source]
+

get the standard deviation transaction length +:return: standard deviation transaction length +:rtype: float

+
+ +
+
+getTotalNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getTransanctionalLengthDistribution() Dict[int, int][source]
+

get transaction length +:return: transactional length +:rtype: dict

+
+ +
+
+getVarianceTransactionLength() float[source]
+

get the variance transaction length +:return: variance transaction length +:rtype: float

+
+ +
+
+plotGraphs() None[source]
+
+ +
+
+printStats() None[source]
+
+ +
+
+readDatabase() None[source]
+

read database from input file and store into database and size of each transaction. +And store the period between transactions as list

+
+ +
+
+run() None[source]
+
+ +
+
+save(data: dict, outputFile: str) None[source]
+

store data into outputFile +:param data: input data +:type data: dict +:param outputFile: output file name or path to store +:type outputFile: str +:return: None

+
+ +
+
+ +
+
+

PAMI.extras.stats.utilityDatabase module

+
+
+class PAMI.extras.stats.utilityDatabase.utilityDatabase(inputFile: str | DataFrame, sep: str = '\t')[source]
+

Bases: object

+
+
Description:
+

UtilityDatabase is class to get stats of database.

+
+
Attributes:
+
+
param inputFile:
+

file : +input file path

+
+
param sep:
+

str +separator in file. Default is tab space.

+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.dbStats import UtilityDatabase as db
+
+obj = db.UtilityDatabase(iFile, "   " )
+
+obj.save(oFile)
+
+obj.run()
+
+obj.printStats()
+
+
+
+
+creatingItemSets() None[source]
+

Storing the complete transactions of the database/input file in a database variable

+
+ +
+
+getAverageTransactionLength() float[source]
+

get the average transaction length. It is sum of all transaction length divided by database length. +:return: average transaction length +:rtype: float

+
+ +
+
+getAverageUtility() float[source]
+

get the average utility +:return: average utility +:rtype: float

+
+ +
+
+getDatabaseSize() int[source]
+

get the size of database +:return: size of database after reading from database +:rtype: int

+
+ +
+
+getFrequenciesInRange() dict[source]
+

This function is used to get the Frequencies in range +:return: Frequencies In Range +:rtype: dict

+
+ +
+
+getMaximumTransactionLength() int[source]
+

get the maximum transaction length +:return: maximum transaction length +:rtype: int

+
+ +
+
+getMaximumUtility() int[source]
+

get the maximum utility +:return: maximum utility +:rtype: int

+
+ +
+
+getMinimumTransactionLength() int[source]
+

get the minimum transaction length +:return: minimum transaction length +:rtype: int

+
+ +
+
+getMinimumUtility() int[source]
+

get the minimum utility +:return: minimum utility +:rtype: int

+
+ +
+
+getNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getSortedListOfItemFrequencies() dict[source]
+

get sorted list of item frequencies +:return: item frequencies +:rtype: dict

+
+ +
+
+getSortedUtilityValuesOfItem() dict[source]
+

get sorted utility value each item. key is item and value is utility of item +:return: sorted dictionary utility value of item +:rtype: dict

+
+ +
+
+getSparsity() float[source]
+

get the sparsity of database +:return: sparsity of database after reading from database +:rtype: float

+
+ +
+
+getStandardDeviationTransactionLength() float[source]
+

get the standard deviation transaction length +:return: standard deviation transaction length +:rtype: float

+
+ +
+
+getTotalNumberOfItems() int[source]
+

get the number of items in database. +:return: number of items +:rtype: int

+
+ +
+
+getTotalUtility() int[source]
+

get sum of utility +:return: total utility +:rtype: int

+
+ +
+
+getTransanctionalLengthDistribution() dict[source]
+

get transaction length +:return: Transanctional Length Distribution +:rtype: dict

+
+ +
+
+getVarianceTransactionLength() float[source]
+

get the variance transaction length +:return: variance transaction length +:rtype: float

+
+ +
+
+plotGraphs() None[source]
+
+ +
+
+printStats() None[source]
+

This function is used to print the results

+
+ +
+
+readDatabase() None[source]
+

read database from input file and store into database and size of each transaction.

+
+ +
+
+run() None[source]
+
+ +
+
+save(data, outputFile) None[source]
+

store data into outputFile +:param data: input data +:type data: dict +:param outputFile: output file name or path to store +:type outputFile: str +:return: None

+
+ +
+
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.extras.syntheticDataGenerator.html b/sphinx/_build/html/PAMI.extras.syntheticDataGenerator.html new file mode 100644 index 000000000..0ff02a786 --- /dev/null +++ b/sphinx/_build/html/PAMI.extras.syntheticDataGenerator.html @@ -0,0 +1,1197 @@ + + + + + + + PAMI.extras.syntheticDataGenerator package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.extras.syntheticDataGenerator package

+
+

Submodules

+
+
+

PAMI.extras.syntheticDataGenerator.TemporalDatabase module

+
+
+class PAMI.extras.syntheticDataGenerator.TemporalDatabase.TemporalDatabase(numOfTransactions: int, avgLenOfTransactions: int, numItems: int, outputFile: str, percentage: int = 50, sep: str = '\t', typeOfFile: str = 'Database')[source]
+

Bases: object

+
+
Description:
+
    +
  • creates a temporal database with required parameter (e.g.,numOfTransactions, avgLenOfTransactions, numItems and outputFile).

  • +
  • output can be printed in two ways either in text file or dataframe depending on the input type.

  • +
+
+
Attributes:
+
+
param numOfTransactions:
+

int +number of transactions

+
+
param avgLenOfTransactions:
+

int +average length of transactions

+
+
param numItems:
+

int +number of items

+
+
param outputFile:
+

str +the name the output file

+
+
param percentage:
+

int +percentage of coinToss for TID of temporalDatabase

+
+
param sep:
+

str +seperator for database output file

+
+
param typeOfFile:
+

str +specify database or dataframe to get corresponding output

+
+
+
+
Methods:
+
+
getFileName():

returns filename

+
+
createTemporalFile():

creates temporal database file or dataframe

+
+
getDatabaseAsDataFrame:

returns dataframe

+
+
performCoinFlip():

Perform a coin flip with the given probability

+
+
tuning():

Tune the arrayLength to match avgLenOfTransactions

+
+
createTemporalFile():

create Temporal database or dataframe depending on input

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 TemporalDatabase.py <numOfTransactions> <avgLenOfTransactions> <numItems> <outputFile>
+
+Example Usage:
+
+(.venv) $ python3 TemporalDatabase.py 50.0 10.0 100 temporal.txt
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.extras.syntheticDataGenerator import TemporalDatabase as db
+
+temporalDB = db(numOfTransactions, avgTransactionLength, numItems, outFileName)
+
+temporalDB.create(percentage)
+
+
+
+
+create() None[source]
+

create Temporal database or dataframe depending on type of file specified. +:return: None

+
+ +
+
+getDatabaseAsDataFrame() DataFrame[source]
+

This function return the database in dataframe format.

+

return: pd.dataframe

+
+ +
+
+getFileName() str[source]
+

This function take the name of the outputfile. +:return: outputFile.

+
+ +
+
+performCoinFlip(probability: float) bool[source]
+

Perform a coin flip with the given probability.

+
+ +
+
+tuning(array, sumRes) list[source]
+

Tune the array so that the sum of the values is equal to sumRes

+

Parameters: +:param array: list of values randomly generated. +:type array: list +:param sumRes: target sum +:type sumRes: int

+

Returns: +array: list - tuned array

+
+ +
+
+ +
+
+

PAMI.extras.syntheticDataGenerator.TransactionalDatabase module

+
+
+class PAMI.extras.syntheticDataGenerator.TransactionalDatabase.TransactionalDatabase(numLines, avgItemsPerLine, numItems)[source]
+

Bases: object

+
+
Description:
+

TransactionalDatabase is a collection of transactions. It only considers the data in transactions and ignores the metadata.

+
+
Attributes:
+
+
numLines: int

Number of lines

+
+
avgItemsPerLine: int

Average number of items per line

+
+
numItems: int

Total number of items

+
+
+
+
Methods:
+
+
create:

Generate the transactional database

+
+
save:

Save the transactional database to a user-specified file

+
+
getTransactions:

Get the transactional database

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 TransactionalDatabase.py <numLines> <avgItemsPerLine> <numItems>
+
+Example Usage:
+
+(.venv) $ python3 TransactionalDatabase.py 50.0 10.0 100
+
+
+
+
+

Importing this algorithm into a python program

+
+

from PAMI.extras.syntheticDataGenerator import TransactionalDatabase as db

+

obj = db(10, 5, 10)

+

obj.create()

+

obj.save(‘db.txt’)

+

print(obj.getTransactions())

+
+
+
+create() None[source]
+

Generate the transactional database with the given input parameters. +Returns: None

+
+ +
+
+generateArray(nums, avg, maxItems) list[source]
+

Generate a random array of length n whose values average to m

+
+
Parameters:
+
    +
  • nums (int) – number of values

  • +
  • avg (int) – average value

  • +
  • maxItems (int) – maximum value

  • +
+
+
+

Returns: +values: list - random array

+
+ +
+
+getTransactions() DataFrame[source]
+

Get the transactional database in dataFrame format

+

Returns: +db: pd.dataFrame - transactional database

+
+ +
+
+save(filename) None[source]
+

Save the transactional database to a file

+
+
Parameters:
+

filename (str) – name of the file

+
+
+
+ +
+
+tuning(array, sumRes) list[source]
+

Tune the array so that the sum of the values is equal to sumRes

+
+
Parameters:
+
    +
  • array (list) – list of values

  • +
  • sumRes (int) – target sum

  • +
+
+
+

Returns: +array: list - tuned array

+
+ +
+
+ +
+
+

PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTemporal module

+
+
+class PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTemporal.createGeoreferentialTemporalDatabase(transactions: int, items: int, avgTransaction: int)[source]
+

Bases: object

+

This class create synthetic geo-referential temporal database.

+
+
Attribute:
+
+
totalTransactionsint

No of transactions

+
+
noOfItemsint or float

No of items

+
+
avgTransactionLengthstr

The length of average transaction

+
+
outputFile: str

Name of the output file.

+
+
+
+
Methods:
+
+
createGeoreferentialTemporalDatabase(outputFile)

Create geo-referential temporal database and store into outputFile

+
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+createGeoreferentialTemporalDatabase(outputFile: str) None[source]
+

create transactional database and return outputFileName

+
+
Parameters:
+

outputFile (str) – file name or path to store database

+
+
Returns:
+

outputFile name

+
+
+
+ +
+
+ +
+
+

PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTransactions module

+
+
+class PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTransactions.createSyntheticGeoreferentialTransaction(transactions, items, avgTransaction)[source]
+

Bases: object

+

This class create synthetic geo-referential transaction database.

+
+
Attribute:
+
+
totalTransactionsint

No of transactions

+
+
itemsint

No of items

+
+
avgTransactionLengthstr

The length of average transaction

+
+
outputFile: str

Name of the output file.

+
+
+
+
Methods:
+
+
createGeoreferentialTransactionDatabase(outputFile)

Create geo-referential transactional database and store into outputFile

+
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+createGeoreferentialTransactionalDatabase(outputFile)[source]
+

create transactional database and return outputFileName

+
+
Parameters:
+

outputFile (str) – file name or path to store database

+
+
Returns:
+

outputFile name

+
+
+
+ +
+
+ +
+
+

PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialUncertainTransaction module

+
+
+class PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialUncertainTransaction.createSyntheticGeoreferentialUncertainTransaction(transactions: int, items: int, avgTransaction: int)[source]
+

Bases: object

+

This class is to create synthetic geo-referential uncertain transaction database.

+
+
Attribute:
+
+
totalTransactionsint

No of transactions

+
+
noOfItemsint

No of items

+
+
avgTransactionLengthint

The length of average transaction

+
+
outputFile: str

Name of the output file.

+
+
+
+
Methods:
+
+
createGeoreferentialuncertainTransactionDatabase(outputFile)

Create geo-referential transactional database store into outputFile

+
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+createGeoreferentialUncertainTransactionalDatabase(outputFile: str) None[source]
+

create transactional database and return outputFileName

+
+
Parameters:
+

outputFile (str) – file name or path to store database

+
+
Returns:
+

outputFile name

+
+
+
+ +
+
+ +
+
+

PAMI.extras.syntheticDataGenerator.createSyntheticTemporal module

+
+
+class PAMI.extras.syntheticDataGenerator.createSyntheticTemporal.createSyntheticTemporal(transactions: int, items: int, avgTransaction: int)[source]
+

Bases: object

+

This class create synthetic temporal database.

+
+
Attribute:
+
+
totalTransactionsint

No of transactions

+
+
noOfItemsint

No of items

+
+
avgTransactionLengthstr

The length of average transaction

+
+
outputFile: str

Name of the output file.

+
+
+
+
Methods:
+
+
createTemporallDatabase(outputFile)

Create temporal database from DataFrame and store into outputFile

+
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+createTemporalDatabase(outputFile: str) None[source]
+

create transactional database and return outputFileName

+
+
Parameters:
+

outputFile (str) – file name or path to store database

+
+
Returns:
+

outputFile name

+
+
+
+ +
+
+ +
+
+

PAMI.extras.syntheticDataGenerator.createSyntheticTransactions module

+
+
+class PAMI.extras.syntheticDataGenerator.createSyntheticTransactions.createSyntheticTransaction(totalTransactions: int, items: int, avgTransactionLength: int)[source]
+

Bases: object

+

This class create synthetic transaction database.

+
+
Attribute:
+
+
totalTransactionsint

No of transactions

+
+
noOfItemsint

No of items

+
+
avgTransactionLengthint

The length of average transaction

+
+
outputFile: str

Name of the output file.

+
+
+
+
Methods:
+
+
createTransactionalDatabase(outputFile)

Create transactional database and store into outputFile

+
+
+
+
+
+

Credits:

+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+createTransactionalDatabase(outputFile: str) None[source]
+

create transactional database and return outputFileName

+
+
Parameters:
+

outputFile (str) – file name or path to store database

+
+
Returns:
+

outputFile name

+
+
+
+ +
+
+ +
+
+

PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTemporal module

+
+
+class PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTemporal.createSyntheticUncertainTemporal(totalTransactions: int, items: int, avgTransaction: int)[source]
+

Bases: object

+

This class create synthetic temporal database.

+
+
Attribute:
+
+
totalTransactionsint

Total no of transactions

+
+
noOfItemsint

No of items

+
+
avgTransactionLengthint

The length of average transaction

+
+
outputFile: str

Name of the output file.

+
+
+
+
Methods:
+
+
createUncertainTemporalDatabase(outputFile)

Create temporal database from DataFrame and store into outputFile

+
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+createUncertainTemporalDatabase(outputFile: str) None[source]
+

create transactional database and return outputFileName

+
+
Parameters:
+

outputFile (str) – file name or path to store database

+
+
Returns:
+

outputFile name

+
+
+
+ +
+
+ +
+
+

PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTransactions module

+
+
+class PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTransactions.createSyntheticUncertainTransaction(transactions: int, items: int, avgTransaction: int)[source]
+

Bases: object

+

This class create synthetic transaction database.

+
+
Attribute:
+
+
totalTransactionsint

No of transactions

+
+
noOfItemsint

No of items

+
+
avgTransactionLengthstr

The length of average transaction

+
+
outputFile: str

Name of the output file.

+
+
+
+
Methods:
+
+
createUncertainTransactionalDatabase(outputFile)

Create uncertain transactional database and store into outputFile

+
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+createUncertainTransactionalDatabase(outputFile: str) None[source]
+

create transactional database and return outputFileName

+
+
Parameters:
+

outputFile (str) – file name or path to store database

+
+
Returns:
+

outputFile name

+
+
+
+ +
+
+ +
+
+

PAMI.extras.syntheticDataGenerator.createSyntheticUtility module

+
+
+class PAMI.extras.syntheticDataGenerator.createSyntheticUtility.createSyntheticUtility(transactions: int, items: int, maxUtilRange: int, avgTransaction: int)[source]
+

Bases: object

+

This class create synthetic utility database.

+
+
Attribute:
+
+
totalTransactionsint

No of transactions

+
+
noOfItemsint

No of items

+
+
maxUtilRange: int

Maximum utility range

+
+
avgTransactionLengthint

The length of average transaction

+
+
outputFile: str

Name of the output file.

+
+
+
+
Methods:
+
+
createUtilityDatabase(outputFile)

Create utility database from DataFrame and store into outputFile

+
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+createUtilityDatabase(outputFile: str) None[source]
+

create transactional database and return outputFileName

+
+
Parameters:
+

outputFile (str) – file name or path to store database

+
+
Returns:
+

outputFile name

+
+
+
+ +
+
+ +
+
+

PAMI.extras.syntheticDataGenerator.fuzzyDatabase module

+
+
+

PAMI.extras.syntheticDataGenerator.generateTemporal module

+
+
+class PAMI.extras.syntheticDataGenerator.generateTemporal.generateTemporal(transactionSize: int, numOfItems: int, avgTransactionLength: int)[source]
+

Bases: object

+
+
+generate() None[source]
+
+ +
+
+save(outputFile: str, sep='\t') None[source]
+
+ +
+ +
+
+

PAMI.extras.syntheticDataGenerator.generateTransactional module

+
+
+class PAMI.extras.syntheticDataGenerator.generateTransactional.generateTransactional(transactionSize: int, numOfItems: int, avgTransactionLength: int)[source]
+

Bases: object

+
+
+generate() None[source]
+
+ +
+
+save(outputFile: str, sep='\t') None[source]
+
+ +
+ +
+
+

PAMI.extras.syntheticDataGenerator.generateUncertainTemporal module

+
+
+class PAMI.extras.syntheticDataGenerator.generateUncertainTemporal.generateUncertainTemporal(transactionSize: int, numOfItems: int, avgTransactionLength: int, significant=2)[source]
+

Bases: object

+
+
+generate() None[source]
+
+ +
+
+save(outputFile: str, sep='\t') None[source]
+
+ +
+ +
+
+

PAMI.extras.syntheticDataGenerator.generateUncertainTransactional module

+
+
+class PAMI.extras.syntheticDataGenerator.generateUncertainTransactional.generateUncertainTransactional(transactionSize: int, numOfItems: int, avgTransactionLength: int, significant=2)[source]
+

Bases: object

+
+
+generate() None[source]
+
+ +
+
+save(outputFile: str, sep='\t') None[source]
+
+ +
+ +
+
+

PAMI.extras.syntheticDataGenerator.generateUtilityTemporal module

+
+
+class PAMI.extras.syntheticDataGenerator.generateUtilityTemporal.generateUtilityTemporal(transactionSize: int, numOfItems: int, avgTransactionLength: int, minUtilityValue: int, maxUtilityValue: int, minNumOfTimesAnItem: int, maxNumOfTimesAnItem: int)[source]
+

Bases: object

+
+
+generate() None[source]
+
+ +
+
+save(outputFile: str, sep='\t', type='utility') None[source]
+
+ +
+ +
+
+

PAMI.extras.syntheticDataGenerator.generateUtilityTransactional module

+
+
+class PAMI.extras.syntheticDataGenerator.generateUtilityTransactional.generateUtilityTransactional(transactionSize: int, numOfItems: int, avgTransactionLength: int, minUtilityValue: int, maxUtilityValue: int, minNumOfTimesAnItem: int, maxNumOfTimesAnItem: int)[source]
+

Bases: object

+
+
+generate() None[source]
+
+ +
+
+save(outputFile: str, sep='\t', type='utility') None[source]
+
+ +
+ +
+
+

PAMI.extras.syntheticDataGenerator.georeferencedTemporalDatabase module

+
+
+

PAMI.extras.syntheticDataGenerator.georeferencedTransactionalDatabase module

+
+
+

PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase module

+
+
+class PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase.syntheticUtilityDatabase(totalTransactions: int, numOfItems: int, maxUtilRange: int, avgTransactionLength: int)[source]
+

Bases: object

+

This class creates a synthetic utility database.

+
+
+totalTransactions
+

Number of transactions.

+
+
Type:
+

int

+
+
+
+ +
+
+numOfItems
+

Number of items.

+
+
Type:
+

int

+
+
+
+ +
+
+maxUtilRange
+

Maximum utility range.

+
+
Type:
+

int

+
+
+
+ +
+
+avgTransactionLength
+

The length of average transaction.

+
+
Type:
+

int

+
+
+
+ +
+
+__init__(totalTransactions, numOfItems, maxUtilRange, avgTransactionLength)[source]
+

Constructor to initialize the database parameters.

+
+ +
+
+createSyntheticUtilityDatabase(outputFile)[source]
+

Create utility database and store it in the specified output file.

+
+ +
+
+createRandomNumbers(n, targetSum)[source]
+

Generate a list of random numbers with a specified target sum.

+
+ +
+
+save(outputFile)[source]
+

Save the generated utility database to a CSV file.

+
+ +
+
Credits:

The complete program was written by A.Hemanth sree sai under the supervision of Professor Rage Uday Kiran.

+
+
+
+
+createRandomNumbers(n: int, targetSum: int) list[float][source]
+

Generate a list of random numbers with a specified target sum.

+
+
Parameters:
+
    +
  • n (int) – Number of random numbers to generate.

  • +
  • targetSum (int) – Target sum for the generated random numbers.

  • +
+
+
Returns:
+

List of generated random numbers normalized and multiplied by the target sum.

+
+
Return type:
+

list

+
+
+
+ +
+
+createSyntheticUtilityDatabase(outputFile: str) None[source]
+

Create utility database and store it in the specified output file.

+
+
Parameters:
+

outputFile (str) – File name or path to store the database.

+
+
+
+ +
+
+save(outputFile: str) None[source]
+

Save the generated utility database to a CSV file.

+
+
Parameters:
+

outputFile (str) – File name or path to store the CSV file.

+
+
+
+ +
+ +
+
+

PAMI.extras.syntheticDataGenerator.temporalDatabaseGen module

+
+
+class PAMI.extras.syntheticDataGenerator.temporalDatabaseGen.CreateSyntheticTemporal(total_transactions: int, num_of_items: int, avg_transaction_length: int)[source]
+

Bases: object

+

This class creates a synthetic temporal database.

+
+
+total_transactions
+

Number of transactions.

+
+
Type:
+

int

+
+
+
+ +
+
+num_of_items
+

Number of items.

+
+
Type:
+

int

+
+
+
+ +
+
+avg_transaction_length
+

The length of average transaction.

+
+
Type:
+

int

+
+
+
+ +
+
+create_temporal_database(output_file)[source]
+

Create temporal database and store it in the specified output file.

+
+ +
+
Credits:

The complete program was written by A.Hemanth sree sai under the supervision of Professor Rage Uday Kiran.

+
+
+
+
+create_temporal_database(output_file: str) None[source]
+

Create temporal database and store it in the specified output file.

+
+
Parameters:
+

output_file (str) – File name or path to store the database.

+
+
+
+ +
+
+generate_random_numbers(n: int, target_sum: int) list[float][source]
+

Generate a list of random numbers with a specified target sum.

+
+
Parameters:
+
    +
  • n (int) – Number of random numbers to generate.

  • +
  • target_sum (int) – Target sum for the generated random numbers.

  • +
+
+
Returns:
+

List of generated random numbers normalized and multiplied by the target sum.

+
+
Return type:
+

list

+
+
+
+ +
+ +
+
+

PAMI.extras.syntheticDataGenerator.utilityDatabase module

+
+
+class PAMI.extras.syntheticDataGenerator.utilityDatabase.UtilityDataGenerator(databaseSize, numberOfItems, averageLengthOfTransaction, minimumInternalUtilityValue, maximumInternalUtilityValue, minimumExternalUtilityValue, maximumExternalUtilityValue)[source]
+

Bases: object

+
+
+Generate()[source]
+
+ +
+
+GenerateAndPrintItemPairs()[source]
+
+ +
+
+GenerateExternalUtilityData()[source]
+
+ +
+
+GetExternalUtilityData()[source]
+
+ +
+
+GetInternalUtilityData()[source]
+
+ +
+
+GetUtilityData()[source]
+
+ +
+
+Save(fileName)[source]
+
+ +
+
+SaveItemsInternalUtilityValues(fileName)[source]
+
+ +
+
+Saveitemsexternalutilityvalues(fileName)[source]
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.extras.visualize.html b/sphinx/_build/html/PAMI.extras.visualize.html new file mode 100644 index 000000000..95c00d887 --- /dev/null +++ b/sphinx/_build/html/PAMI.extras.visualize.html @@ -0,0 +1,184 @@ + + + + + + + PAMI.extras.visualize package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.extras.visualize package

+
+

Submodules

+
+
+

PAMI.extras.visualize.graphs module

+
+
+class PAMI.extras.visualize.graphs.graphDatabase(iFile)[source]
+

Bases: object

+
+
+plot()[source]
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.faultTolerantFrequentPattern.basic.html b/sphinx/_build/html/PAMI.faultTolerantFrequentPattern.basic.html new file mode 100644 index 000000000..f9921e52d --- /dev/null +++ b/sphinx/_build/html/PAMI.faultTolerantFrequentPattern.basic.html @@ -0,0 +1,617 @@ + + + + + + + PAMI.faultTolerantFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.faultTolerantFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.faultTolerantFrequentPattern.basic.FTApriori module

+
+
+class PAMI.faultTolerantFrequentPattern.basic.FTApriori.FTApriori(iFile, minSup, itemSup, minLength, faultTolerance, sep='\t')[source]
+

Bases: _faultTolerantFrequentPatterns

+
+
Description:
+

FT-Apriori is one of the fundamental algorithm to discover fault-tolerant frequent patterns in a transactional database. +This program employs apriori property (or downward closure property) to reduce the search space effectively.

+
+
Reference:
+

Pei, Jian & Tung, Anthony & Han, Jiawei. (2001). Fault-Tolerant Frequent Pattern Mining: Problems and Challenges.

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of fault Tolerant frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of falut Tolerant frequent patterns

  • +
  • minSup – float or int or str : +The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

  • +
  • itemSup – int or float : +Frequency of an item

  • +
  • minLength – int : +minimum length of a pattern

  • +
  • faultTolerance – int

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 FTApriori.py <inputFile> <outputFile> <minSup> <itemSup> <minLength> <faultTolerance>
+
+Example Usage:
+
+(.venv) $ python3 FTApriori.py sampleDB.txt patterns.txt 10.0 3.0 3 1
+
+
+
+

Note

+

minSup will be considered in times of minSup and count of database transactions

+
+
+
+

Importing this algorithm into a python program

+
from PAMI.faultTolerantFrequentPattern.basic import FTApriori as alg
+
+obj = alg.FTApriori(inputFile,minSup,itemSup,minLength,faultTolerance)
+
+obj.mine()
+
+patterns = obj.getPatterns()
+
+print("Total number of fault-tolerant frequent patterns:",  len(patterns))
+
+obj.save("outputFile")
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:",  memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS",  memRSS)
+
+run = obj.getRuntime
+
+print("Total ExecutionTime in seconds:",  run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[Tuple[str, ...], int][source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Fault-tolerant frequent pattern mining process will start from here

+
+ +
+
+printResults() None[source]
+

This is function is used to print the result

+
+ +
+
+save(outFile) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csvfile) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Fault-tolerant frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth module

+
+
+class PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth.FTFPGrowth(iFile: str | DataFrame, minSup: int | float | str, itemSup: float, minLength: int, faultTolerance: int, sep: str = '\t')[source]
+

Bases: _faultTolerantFrequentPatterns

+
+
Description:
+

FPGrowth is one of the fundamental algorithm to discover frequent patterns in a transactional database. +It stores the database in compressed fp-tree decreasing the memory usage and extracts the +patterns from tree.It employs downward closure property to reduce the search space effectively.

+
+
Reference:
+

Han, J., Pei, J., Yin, Y. et al. Mining Frequent Patterns without Candidate Generation: A Frequent-Pattern +Tree Approach. Data Mining and Knowledge Discovery 8, 53–87 (2004). https://doi.org/10.1023

+
+
Parameters:
+
    +
  • iFile – file : +Name of the Input file to mine complete set of fault Tolerant frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of falut Tolerant frequent patterns

  • +
  • minSup – float or int or str : +The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

  • +
+
+
+
+
:param sepstr :

This variable is used to distinguish items from one another in a transaction. The default separator is tab space or . +However, the users can override their default separator.

+
+
+
+
Attributes:
+
+
startTime: float :

To record the start time of the mining process

+
+
endTime: float :

To record the completion time of the mining process

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transactions

+
+
treeclass

it represents the Tree class

+
+
finalPatternsdict

it represents to store the patterns

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to an output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets()

Scans the dataset or dataframes and stores in list format

+
+
frequentOneItem()

Extracts the one-frequent patterns from transactions

+
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 FPGrowth.py <inputFile> <outputFile> <minSup>
+
+Example Usage:
+
+(.venv) $ python3 FPGrowth.py sampleDB.txt patterns.txt 10.0
+
+
+
+

Note

+

minSup will be considered in times of minSup and count of database transactions

+
+
+
+

Sample run of the importing code:

+
from PAMI.faultTolerantFrequentPattern.basic import FTFPGrowth as alg
+
+obj = alg.FTFPGrowth(inputFile,minSup,itemSup,minLength,faultTolerance)
+
+obj.mine()
+
+patterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(patterns))
+
+obj.save(oFile)
+
+Df = obj.getPatternInDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, int][source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Main program to start the operation

+
+ +
+
+printResults() None[source]
+

This function is used to print the results

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Main program to start the operation

+
+ +
+
+ +
+
+

PAMI.faultTolerantFrequentPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.faultTolerantFrequentPattern.html b/sphinx/_build/html/PAMI.faultTolerantFrequentPattern.html new file mode 100644 index 000000000..2fce07224 --- /dev/null +++ b/sphinx/_build/html/PAMI.faultTolerantFrequentPattern.html @@ -0,0 +1,209 @@ + + + + + + + PAMI.faultTolerantFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.frequentPattern.basic.html b/sphinx/_build/html/PAMI.frequentPattern.basic.html new file mode 100644 index 000000000..561822ac1 --- /dev/null +++ b/sphinx/_build/html/PAMI.frequentPattern.basic.html @@ -0,0 +1,1100 @@ + + + + + + + PAMI.frequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.frequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.frequentPattern.basic.Apriori module

+
+
+class PAMI.frequentPattern.basic.Apriori.Apriori(iFile, minSup, sep='\t')[source]
+

Bases: _frequentPatterns

+
+
Description:
+

Apriori is one of the fundamental algorithm to discover frequent patterns in a transactional database. This program employs apriori property (or downward closure property) to reduce the search space effectively. This algorithm employs breadth-first search technique to find the complete set of frequent patterns in a transactional database.

+
+
Reference:
+

Agrawal, R., Imieli ́nski, T., Swami, A.: Mining association rules between sets of items in large databases. +In: SIGMOD. pp. 207–216 (1993), https://doi.org/10.1145/170035.170072

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 Apriori.py <inputFile> <outputFile> <minSup>
+
+Example Usage:
+
+(.venv) $ python3 Apriori.py sampleDB.txt patterns.txt 10.0
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Importing this algorithm into a python program

+
import PAMI.frequentPattern.basic.Apriori as alg
+
+obj = alg.Apriori(iFile, minSup)
+
+obj.mine()
+
+frequentPatterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(frequentPatterns))
+
+obj.save(oFile)
+
+Df = obj.getPatternInDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, int][source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+printResults() None[source]
+

This function is used to print the result

+
+ +
+
+save(outFile) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csvfile) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

PAMI.frequentPattern.basic.ECLAT module

+
+
+class PAMI.frequentPattern.basic.ECLAT.ECLAT(iFile, minSup, sep='\t')[source]
+

Bases: _frequentPatterns

+
+
Description:
+

ECLAT is one of the fundamental algorithm to discover frequent patterns in a transactional database.

+
+
Reference:
+

Mohammed Javeed Zaki: Scalable Algorithms for Association Mining. IEEE Trans. Knowl. Data Eng. 12(3): +372-390 (2000), https://ieeexplore.ieee.org/document/846291

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 ECLAT.py <inputFile> <outputFile> <minSup>
+
+Example Usage:
+
+(.venv) $ python3 ECLAT.py sampleDB.txt patterns.txt 10.0
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Importing this algorithm into a python program

+
import PAMI.frequentPattern.basic.ECLAT as alg
+
+obj = alg.ECLAT(iFile, minSup)
+
+obj.mine()
+
+frequentPatterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(frequentPatterns))
+
+obj.save(oFile)
+
+Df = obj.getPatternInDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by Kundai under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() dict[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+printResults() None[source]
+

Function used to print the results

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csvfile) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

PAMI.frequentPattern.basic.ECLATDiffset module

+
+
+class PAMI.frequentPattern.basic.ECLATDiffset.ECLATDiffset(iFile, minSup, sep='\t')[source]
+

Bases: _frequentPatterns

+
+
Description:
+

ECLATDiffset uses diffset to extract the frequent patterns in a transactional database.

+
+
Reference:
+

KDD ‘03: Proceedings of the ninth ACM SIGKDD international conference on Knowledge discovery and data mining +August 2003 Pages 326–335 https://doi.org/10.1145/956750.956788

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 ECLATDiffset.py <inputFile> <outputFile> <minSup>
+
+Example Usage:
+
+(.venv) $ python3 ECLATDiffset.py sampleDB.txt patterns.txt 10.0
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Importing this algorithm into a python program

+
import PAMI.frequentPattern.basic.ECLATDiffset as alg
+
+obj = alg.ECLATDiffset(iFile, minSup)
+
+obj.mine()
+
+frequentPatterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(frequentPatterns))
+
+obj.savePatterns(oFile)
+
+Df = obj.getPatternInDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by Kundai under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function +:return: returning RSS memory consumed by the mining process +:rtype: float

+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function +:return: returning USS memory consumed by the mining process +:rtype: float

+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process +:return: returning frequent patterns +:rtype: dict

+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe +:return: returning frequent patterns in a dataframe +:rtype: pd.DataFrame

+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process +:return: returning total amount of runtime taken by the mining process +:rtype: float

+
+ +
+
+mine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file +:param outFile: name of the output file +:type outFile: csvfile

+
+ +
+
+startMine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

PAMI.frequentPattern.basic.ECLATbitset module

+
+
+class PAMI.frequentPattern.basic.ECLATbitset.ECLATbitset(iFile, minSup, sep='\t')[source]
+

Bases: _frequentPatterns

+
+
Description:
+

ECLATbitset is one of the fundamental algorithm to discover frequent patterns in a transactional database.

+
+
Reference:
+

Mohammed Javeed Zaki: Scalable Algorithms for Association Mining. IEEE Trans. Knowl. Data Eng. 12(3): +372-390 (2000), https://ieeexplore.ieee.org/document/846291

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 ECLATbitset.py <inputFile> <outputFile> <minSup>
+
+Example Usage:
+
+(.venv) $ python3 ECLATbitset.py sampleDB.txt patterns.txt 10.0
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Importing this algorithm into a python program

+
import PAMI.frequentPattern.basic.ECLATbitset as alg
+
+obj = alg.ECLATbitset(iFile, minSup)
+
+obj.mine()
+
+frequentPatterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(frequentPatterns))
+
+obj.save(oFile)
+
+Df = obj.getPatternInDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by Yudai Masu under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function +:return: returning RSS memory consumed by the mining process +:rtype: float

+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function +:return: returning USS memory consumed by the mining process +:rtype: float

+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process +:return: returning frequent patterns +:rtype: dict

+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe +:return: returning frequent patterns in a dataframe +:rtype: pd.DataFrame

+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process +:return: returning total amount of runtime taken by the mining process +:rtype: float

+
+ +
+
+mine() None[source]
+

Frequent pattern mining process will start from here +# Bitset implementation

+
+ +
+
+printResults()[source]
+

This function is used to print the result

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file +:param outFile: name of the outputfile +:type outFile: file

+
+ +
+
+startMine()[source]
+

Frequent pattern mining process will start from here +We start with the scanning the itemSets and store the bitsets respectively. +We form the combinations of single items and check with minSup condition to check the frequency of patterns

+
+ +
+
+ +
+
+

PAMI.frequentPattern.basic.FPGrowth module

+
+
+class PAMI.frequentPattern.basic.FPGrowth.FPGrowth(iFile, minSup, sep='\t')[source]
+

Bases: _frequentPatterns

+
+
Description:
+

FPGrowth is one of the fundamental algorithm to discover frequent patterns in a transactional database. It stores the database in compressed fp-tree decreasing the memory usage and extracts the patterns from tree.It employs downward closure property to reduce the search space effectively.

+
+
Reference:
+

Han, J., Pei, J., Yin, Y. et al. Mining Frequent Patterns without Candidate Generation: A Frequent-Pattern +Tree Approach. Data Mining and Knowledge Discovery 8, 53–87 (2004). https://doi.org/10.1023

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transactions

+
+
treeclass

it represents the Tree class

+
+
finalPatternsdict

it represents to store the patterns

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 FPGrowth.py <inputFile> <outputFile> <minSup>
+
+Example Usage:
+
+(.venv) $ python3 FPGrowth.py sampleDB.txt patterns.txt 10.0
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Importing this algorithm into a python program

+
from PAMI.frequentPattern.basic import FPGrowth as alg
+
+obj = alg.FPGrowth(iFile, minSup)
+
+obj.mine()
+
+frequentPatterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(frequentPatterns))
+
+obj.savePatterns(oFile)
+
+Df = obj.getPatternInDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function +:return: returning RSS memory consumed by the mining process +:rtype: float

+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, int][source]
+

Function to send the set of frequent patterns after completion of the mining process +:return: returning frequent patterns +:rtype: dict

+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Main program to start the operation

+
+ +
+
+printResults() None[source]
+

This function is used to print the results

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csvfile) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine()[source]
+

Starting the mining process

+
+ +
+
+ +
+
+

PAMI.frequentPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.frequentPattern.closed.html b/sphinx/_build/html/PAMI.frequentPattern.closed.html new file mode 100644 index 000000000..c53c86565 --- /dev/null +++ b/sphinx/_build/html/PAMI.frequentPattern.closed.html @@ -0,0 +1,353 @@ + + + + + + + PAMI.frequentPattern.closed package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.frequentPattern.closed package

+
+

Submodules

+
+
+

PAMI.frequentPattern.closed.CHARM module

+
+
+class PAMI.frequentPattern.closed.CHARM.CHARM(iFile, minSup, sep='\t')[source]
+

Bases: _frequentPatterns

+
+
Description:
+

CHARM is an algorithm to discover closed frequent patterns in a transactional database. Closed frequent patterns are patterns if there exists no superset that has the same support count as this original itemset. This algorithm employs depth-first search technique to find the complete set of closed frequent patterns in a

+
+
Reference:
+

Mohammed J. Zaki and Ching-Jui Hsiao, CHARM: An Efficient Algorithm for Closed Itemset Mining, +Proceedings of the 2002 SIAM, SDM. 2002, 457-473, https://doi.org/10.1137/1.9781611972726.27

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transactions

+
+
treeclass

it represents the Tree class

+
+
itemSetCountint

it represents the total no of patterns

+
+
finalPatternsdict

it represents to store the patterns

+
+
tidListdict

stores the timestamps of an item

+
+
hashingdict

stores the patterns with their support to check for the closed property

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 CHARM.py <inputFile> <outputFile> <minSup>
+
+Example Usage:
+
+(.venv) $ python3 CHARM.py sampleDB.txt patterns.txt 10.0
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Importing this algorithm into a python program

+
from PAMI.frequentPattern.closed import CHARM as alg
+
+obj = alg.CHARM(iFile, minSup)
+
+obj.mine()
+
+frequentPatterns = obj.getPatterns()
+
+print("Total number of Closed Frequent Patterns:", len(frequentPatterns))
+
+obj.savePatterns(oFile)
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function +:return: returning RSS memory consumed by the mining process +:rtype: float

+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function +:return: returning USS memory consumed by the mining process +:rtype: float

+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process +:return: returning frequent patterns +:rtype: dict

+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe +:return: returning frequent patterns in a dataframe +:rtype: pd.DataFrame

+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process +:return: returning total amount of runtime taken by the mining process +:rtype: float

+
+ +
+
+mine()[source]
+

Mining process will start from here by extracting the frequent patterns from the database. It performs prefix +equivalence to generate the combinations and closed frequent patterns.

+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file +:param outFile: name of the output file +:type outFile: csvfile

+
+ +
+
+startMine()[source]
+

Mining process will start from here by extracting the frequent patterns from the database. It performs prefix +equivalence to generate the combinations and closed frequent patterns.

+
+ +
+
+ +
+
+

PAMI.frequentPattern.closed.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.frequentPattern.cuda.html b/sphinx/_build/html/PAMI.frequentPattern.cuda.html new file mode 100644 index 000000000..535cbcc8d --- /dev/null +++ b/sphinx/_build/html/PAMI.frequentPattern.cuda.html @@ -0,0 +1,194 @@ + + + + + + + PAMI.frequentPattern.cuda package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.frequentPattern.cuda package

+
+

Submodules

+
+
+

PAMI.frequentPattern.cuda.abstract module

+
+
+

PAMI.frequentPattern.cuda.cuApriori module

+
+
+

PAMI.frequentPattern.cuda.cuAprioriBit module

+
+
+

PAMI.frequentPattern.cuda.cuEclat module

+
+
+

PAMI.frequentPattern.cuda.cuEclatBit module

+
+
+

PAMI.frequentPattern.cuda.cudaAprioriGCT module

+
+
+

PAMI.frequentPattern.cuda.cudaAprioriTID module

+
+
+

PAMI.frequentPattern.cuda.cudaEclatGCT module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.frequentPattern.html b/sphinx/_build/html/PAMI.frequentPattern.html new file mode 100644 index 000000000..98d1223cb --- /dev/null +++ b/sphinx/_build/html/PAMI.frequentPattern.html @@ -0,0 +1,339 @@ + + + + + + + PAMI.frequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.frequentPattern package

+
+

Subpackages

+
+ +
+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.frequentPattern.maximal.html b/sphinx/_build/html/PAMI.frequentPattern.maximal.html new file mode 100644 index 000000000..942582d0c --- /dev/null +++ b/sphinx/_build/html/PAMI.frequentPattern.maximal.html @@ -0,0 +1,349 @@ + + + + + + + PAMI.frequentPattern.maximal package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.frequentPattern.maximal package

+
+

Submodules

+
+
+

PAMI.frequentPattern.maximal.MaxFPGrowth module

+
+
+class PAMI.frequentPattern.maximal.MaxFPGrowth.MaxFPGrowth(iFile, minSup, sep='\t')[source]
+

Bases: _frequentPatterns

+
+
Description:
+

MaxFP-Growth is one of the fundamental algorithm to discover maximal frequent patterns in a transactional database.

+
+
Reference:
+

Grahne, G. and Zhu, J., “High Performance Mining of Maximal Frequent itemSets”, +http://users.encs.concordia.ca/~grahne/papers/hpdm03.pdf

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count.

  • +
  • maxPer – float : +The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transactions

+
+
treeclass

it represents the Tree class

+
+
itemSetCountint

it represents the total no of patterns

+
+
finalPatternsdict

it represents to store the patterns

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 MaxFPGrowth.py <inputFile> <outputFile> <minSup>
+
+Example Usage:
+
+(.venv) $ python3 MaxFPGrowth.py sampleDB.txt patterns.txt 0.3
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Importing this algorithm into a python program

+
from PAMI.frequentPattern.maximal import MaxFPGrowth as alg
+
+obj = alg.MaxFPGrowth("../basic/sampleTDB.txt", "2")
+
+obj.mine()
+
+frequentPatterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(frequentPatterns))
+
+obj.savePatterns("patterns")
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function +:return: returning RSS memory consumed by the mining process +:rtype: float

+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function +:return: returning USS memory consumed by the mining process +:rtype: float

+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process +:return: returning frequent patterns +:rtype: dict

+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe +:return: returning frequent patterns in a dataframe +:rtype: pd.DataFrame

+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process +:return: returning total amount of runtime taken by the mining process +:rtype: float

+
+ +
+
+mine()[source]
+

Mining process will start from this function

+
+ +
+
+printResults()[source]
+

This functon is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to a output file +:param outFile: name of the output file +:type outFile: csvfile

+
+ +
+
+startMine()[source]
+

Mining process will start from this function

+
+ +
+
+ +
+
+

PAMI.frequentPattern.maximal.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.frequentPattern.pyspark.html b/sphinx/_build/html/PAMI.frequentPattern.pyspark.html new file mode 100644 index 000000000..f7939e520 --- /dev/null +++ b/sphinx/_build/html/PAMI.frequentPattern.pyspark.html @@ -0,0 +1,182 @@ + + + + + + + PAMI.frequentPattern.pyspark package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.frequentPattern.pyspark package

+
+

Submodules

+
+
+

PAMI.frequentPattern.pyspark.abstract module

+
+
+

PAMI.frequentPattern.pyspark.parallelApriori module

+
+
+

PAMI.frequentPattern.pyspark.parallelECLAT module

+
+
+

PAMI.frequentPattern.pyspark.parallelFPGrowth module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.frequentPattern.topk.html b/sphinx/_build/html/PAMI.frequentPattern.topk.html new file mode 100644 index 000000000..f35bf994d --- /dev/null +++ b/sphinx/_build/html/PAMI.frequentPattern.topk.html @@ -0,0 +1,372 @@ + + + + + + + PAMI.frequentPattern.topk package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.frequentPattern.topk package

+
+

Submodules

+
+
+

PAMI.frequentPattern.topk.FAE module

+
+
+class PAMI.frequentPattern.topk.FAE.FAE(iFile, k, sep='\t')[source]
+

Bases: _frequentPatterns

+
+
Description:
+

Top - K is and algorithm to discover top frequent patterns in a transactional database.

+
+
Reference:
+

Zhi-Hong Deng, Guo-Dong Fang: Mining Top-Rank-K Frequent Patterns: DOI: 10.1109/ICMLC.2007.4370261 · Source: IEEE Xplore +https://ieeexplore.ieee.org/document/4370261

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • k – int : +User specified count of top frequent patterns

  • +
  • minimum – int : +Minimum number of frequent patterns to consider in analysis

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
finalPatternsdict

it represents to store the patterns

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 FAE.py <inputFile> <outputFile> <K>
+
+Example Usage:
+
+(.venv) $ python3 FAE.py sampleDB.txt patterns.txt 10
+
+
+
+

Note

+

k will be considered as count of top frequent patterns to consider in analysis

+
+
+
+

Importing this algorithm into a python program

+
import PAMI.frequentPattern.topK.FAE as alg
+
+obj = alg.FAE(iFile, K)
+
+obj.mine()
+
+topKFrequentPatterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(topKFrequentPatterns))
+
+obj.save(oFile)
+
+Df = obj.getPatternInDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine()[source]
+

Main function of the program

+
+ +
+
+printTOPK()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Main function of the program

+
+ +
+
+ +
+
+

PAMI.frequentPattern.topk.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.fuzzyCorrelatedPattern.basic.html b/sphinx/_build/html/PAMI.fuzzyCorrelatedPattern.basic.html new file mode 100644 index 000000000..268cf7e6b --- /dev/null +++ b/sphinx/_build/html/PAMI.fuzzyCorrelatedPattern.basic.html @@ -0,0 +1,447 @@ + + + + + + + PAMI.fuzzyCorrelatedPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.fuzzyCorrelatedPattern.basic package

+
+

Submodules

+
+
+

PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth module

+
+
+class PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth.Element(tid: int, IUtil: float, RUtil: float)[source]
+

Bases: object

+

A class represents an Element of a fuzzy list

+
+
Attributes:
+
+
tidint

keep tact of transaction id

+
+
IUtils: float

the utility of a fuzzy item in the transaction

+
+
RUtilfloat

the neighbourhood resting value of a fuzzy item in the transaction

+
+
+
+
+
+ +
+
+class PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth.FCPGrowth(iFile: str, minSup: int, minAllConf: float, sep: str = '\t')[source]
+

Bases: _corelatedFuzzyFrequentPatterns

+
+
Description:
+

FCPGrowth is the algorithm to discover Correlated Fuzzy-frequent patterns in a transactional database. +it is based on traditional fuzzy frequent pattern mining.

+
+
Reference:
+

Lin, N.P., & Chueh, H. (2007). Fuzzy correlation rules mining. +https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.416.6053&rep=rep1&type=pdf

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float.

  • +
  • maxPer – float : +The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count.

  • +
  • minAllConf – float : +The user can specify minAllConf values within the range (0, 1).

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the input file to mine complete set of fuzzy spatial frequent patterns

+
+
oFilefile

Name of the oFile file to store complete set of fuzzy spatial frequent patterns

+
+
minSupint

The user given support

+
+
minAllConf: float

user Specified minAllConf( should be in range 0 and 1)

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTimeTime:float

To record the startTime time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
itemsCnt: int

To record the number of fuzzy spatial itemSets generated

+
+
mapItemsLowSum: map

To keep track of low region values of items

+
+
mapItemsMidSum: map

To keep track of middle region values of items

+
+
mapItemsHighSum: map

To keep track of high region values of items

+
+
mapItemSum: map

To keep track of sum of Fuzzy Values of items

+
+
mapItemRegions: map

To Keep track of fuzzy regions of item

+
+
jointCnt: int

To keep track of the number of FFI-list that was constructed

+
+
BufferSize: int

represent the size of Buffer

+
+
itemBuffer list

to keep track of items in buffer

+
+
+
+
Methods:
+
+
startTimeMine()

Mining process will startTime from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
getRatio(self, prefix, prefixLen, item)

Method to calculate the ration of itemSet

+
+
convert(value):

To convert the given user specified value

+
+
FSFIMining( prefix, prefixLen, fsFim, minSup)

Method generate FFI from prefix

+
+
construct(px, py)

A function to construct Fuzzy itemSet from 2 fuzzy itemSets

+
+
findElementWithTID(uList, tid)

To find element with same tid as given

+
+
WriteOut(prefix, prefixLen, item, sumIUtil,ratio)

To Store the patten

+
+
+
+
+
+

Executing the code on terminal :

+
Format:
+
+(.venv) $ python3 FCPGrowth.py <inputFile> <outputFile> <minSup> <minAllConf> <sep>
+
+Example Usage:
+
+(.venv) $ python3 FCPGrowth.py sampleTDB.txt output.txt 2 0.2
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Sample run of importing the code:

+
from PAMI.fuzzyCorrelatedPattern.basic import FCPGrowth as alg
+
+obj = alg.FCPGrowth("input.txt",2,0.4)
+
+obj.mine()
+
+correlatedFuzzyFrequentPatterns = obj.getPatterns()
+
+print("Total number of Correlated Fuzzy Frequent Patterns:", len(correlatedFuzzyFrequentPatterns))
+
+obj.save("output")
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, List[float]][source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Frequent pattern mining process will startTime from here

+
+ +
+
+printResults() None[source]
+

This function is used to print the result

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+startMine() None[source]
+

Frequent pattern mining process will startTime from here

+
+ +
+
+ +
+
+PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth.main()[source]
+
+ +
+
+

PAMI.fuzzyCorrelatedPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.fuzzyCorrelatedPattern.html b/sphinx/_build/html/PAMI.fuzzyCorrelatedPattern.html new file mode 100644 index 000000000..90464ef8e --- /dev/null +++ b/sphinx/_build/html/PAMI.fuzzyCorrelatedPattern.html @@ -0,0 +1,196 @@ + + + + + + + PAMI.fuzzyCorrelatedPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.fuzzyFrequentPattern.basic.html b/sphinx/_build/html/PAMI.fuzzyFrequentPattern.basic.html new file mode 100644 index 000000000..4498af412 --- /dev/null +++ b/sphinx/_build/html/PAMI.fuzzyFrequentPattern.basic.html @@ -0,0 +1,669 @@ + + + + + + + PAMI.fuzzyFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.fuzzyFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.fuzzyFrequentPattern.basic.FFIMiner module

+
+
+class PAMI.fuzzyFrequentPattern.basic.FFIMiner.FFIMiner(iFile: str, minSup: float, sep: str = '\t')[source]
+

Bases: _fuzzyFrequentPattenrs

+
+
Description:
+

Fuzzy Frequent Pattern-Miner is desired to find all frequent fuzzy patterns which is on-trivial and challenging problem +to its huge search space.we are using efficient pruning techniques to reduce the search space.

+
+
Reference:
+

Lin, Chun-Wei & Li, Ting & Fournier Viger, Philippe & Hong, Tzung-Pei. (2015). +A fast Algorithm for mining fuzzy frequent itemsets. Journal of Intelligent & Fuzzy Systems. 29. +2373-2379. 10.3233/IFS-151936. +https://www.researchgate.net/publication/286510908_A_fast_Algorithm_for_mining_fuzzy_frequent_itemSets

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float.

  • +
  • maxPer – float : +The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count.

  • +
  • fuzFile – str : +The user can specify fuzFile.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilestring

Name of the input file to mine complete set of fuzzy frequent patterns

+
+
fmFilestring

Name of the fuzzy membership file to mine complete set of fuzzy frequent patterns

+
+
oFilestring

Name of the oFile file to store complete set of fuzzy frequent patterns

+
+
minSupfloat

The user given minimum support

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
itemsCnt: int

To record the number of fuzzy spatial itemSets generated

+
+
mapItemSum: map

To keep track of sum of Fuzzy Values of items

+
+
joinsCnt: int

To keep track of the number of ffi-list that was constructed

+
+
BufferSize: int

represent the size of Buffer

+
+
itemSetBuffer list

to keep track of items in buffer

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
convert(value)

To convert the given user specified value

+
+
compareItems(o1, o2)

A Function that sort all ffi-list in ascending order of Support

+
+
FSFIMining(prefix, prefixLen, FSFIM, minSup)

Method generate ffi from prefix

+
+
construct(px, py)

A function to construct Fuzzy itemSet from 2 fuzzy itemSets

+
+
findElementWithTID(uList, tid)

To find element with same tid as given

+
+
WriteOut(prefix, prefixLen, item, sumIUtil)

To Store the patten

+
+
+
+
+
+

Executing the code on terminal :

+
Format:
+
+(.venv) $ python3 FFIMiner.py <inputFile> <outputFile> <minSup> <separator>
+
+Example Usage:
+
+(.venv) $ python3  FFIMiner.py sampleTDB.txt output.txt 6
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Sample run of importing the code:

+
from PAMI.fuzzyFrequentPattern import FFIMiner as alg
+
+obj = alg.FFIMiner("input.txt", 2)
+
+obj.mine()
+
+fuzzyFrequentPattern = obj.getPatterns()
+
+print("Total number of Fuzzy Frequent Patterns:", len(fuzzyFrequentPattern))
+
+obj.save("outputFile")
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() dict[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

fuzzy-Frequent pattern mining process will start from here

+
+ +
+
+printResults() None[source]
+

This function is used to print the results

+
+ +
+
+save(outFile) dict[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

dictionary of frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+startMine() None[source]
+

fuzzy-Frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

PAMI.fuzzyFrequentPattern.basic.FFIMiner_old module

+
+
+class PAMI.fuzzyFrequentPattern.basic.FFIMiner_old.FFIMiner(iFile: str, fuzFile: str, minSup: float, sep: str = '\t')[source]
+

Bases: _fuzzyFrequentPattenrs

+
+
Description:
+

Fuzzy Frequent Pattern-Miner is desired to find all frequent fuzzy patterns which is on-trivial and challenging problem +to its huge search space.we are using efficient pruning techniques to reduce the search space.

+
+
Reference:
+

Lin, Chun-Wei & Li, Ting & Fournier Viger, Philippe & Hong, Tzung-Pei. (2015). +A fast Algorithm for mining fuzzy frequent itemsets. Journal of Intelligent & Fuzzy Systems. 29. +2373-2379. 10.3233/IFS-151936. +https://www.researchgate.net/publication/286510908_A_fast_Algorithm_for_mining_fuzzy_frequent_itemSets

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float.

  • +
  • maxPer – float : +The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count.

  • +
  • fuzFile – str : +The user can specify fuzFile.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilestring

Name of the input file to mine complete set of fuzzy frequent patterns

+
+
fmFilestring

Name of the fuzzy membership file to mine complete set of fuzzy frequent patterns

+
+
oFilestring

Name of the oFile file to store complete set of fuzzy frequent patterns

+
+
minSupfloat

The user given minimum support

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
itemsCnt: int

To record the number of fuzzy spatial itemSets generated

+
+
mapItemsLowSum: map

To keep track of low region values of items

+
+
mapItemsMidSum: map

To keep track of middle region values of items

+
+
mapItemsHighSum: map

To keep track of high region values of items

+
+
mapItemSum: map

To keep track of sum of Fuzzy Values of items

+
+
mapItemRegions: map

To Keep track of fuzzy regions of item

+
+
jointCnt: int

To keep track of the number of ffi-list that was constructed

+
+
BufferSize: int

represent the size of Buffer

+
+
itemBuffer list

to keep track of items in buffer

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
convert(value)

To convert the given user specified value

+
+
compareItems(o1, o2)

A Function that sort all ffi-list in ascending order of Support

+
+
FSFIMining(prefix, prefixLen, FSFIM, minSup)

Method generate ffi from prefix

+
+
construct(px, py)

A function to construct Fuzzy itemSet from 2 fuzzy itemSets

+
+
findElementWithTID(uList, tid)

To find element with same tid as given

+
+
WriteOut(prefix, prefixLen, item, sumIUtil)

To Store the patten

+
+
+
+
+
+

Executing the code on terminal :

+
Format:
+
+(.venv) $ python3 FFIMinerMiner.py <inputFile> <outputFile> <minSup> <separator>
+
+Example Usage:
+
+(.venv) $ python3  FFIMinerMiner.py sampleTDB.txt output.txt 6
+
+(.venv) $ python3  FFIMinerMiner.py sampleTDB.txt output.txt 0.3
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Sample run of importing the code:

+
+

from PAMI.fuzzyFrequentPattern import FFIMiner as alg

+

obj = alg.FFIMiner(“input.txt”, “fuzzyMembership.txt” 2)

+

obj.mine()

+

fuzzyFrequentPattern = obj.getPatterns()

+

print(“Total number of Fuzzy Frequent Patterns:”, len(fuzzyFrequentPattern))

+

obj.save(“outputFile”)

+

memUSS = obj.getMemoryUSS()

+

print(“Total Memory in USS:”, memUSS)

+

memRSS = obj.getMemoryRSS()

+

print(“Total Memory in RSS”, memRSS)

+

run = obj.getRuntime()

+

print(“Total ExecutionTime in seconds:”, run)

+
+
+
+

Credits:

+
+

The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, str][source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

fuzzy-Frequent pattern mining process will start from here

+
+ +
+
+printResults() None[source]
+

This function is used to print the results

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

fuzzy-Frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

PAMI.fuzzyFrequentPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.fuzzyFrequentPattern.html b/sphinx/_build/html/PAMI.fuzzyFrequentPattern.html new file mode 100644 index 000000000..d07bacec4 --- /dev/null +++ b/sphinx/_build/html/PAMI.fuzzyFrequentPattern.html @@ -0,0 +1,209 @@ + + + + + + + PAMI.fuzzyFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.fuzzyGeoreferencedFrequentPattern.basic.html b/sphinx/_build/html/PAMI.fuzzyGeoreferencedFrequentPattern.basic.html new file mode 100644 index 000000000..f48216c60 --- /dev/null +++ b/sphinx/_build/html/PAMI.fuzzyGeoreferencedFrequentPattern.basic.html @@ -0,0 +1,679 @@ + + + + + + + PAMI.fuzzyGeoreferencedFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.fuzzyGeoreferencedFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner module

+
+
+class PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner.FFSPMiner(iFile: str, nFile: str, minSup: float, sep: str = '\t')[source]
+

Bases: _fuzzySpatialFrequentPatterns

+
+
Description:
+

Fuzzy Frequent Spatial Pattern-Miner is desired to find all Spatially frequent fuzzy patterns +which is on-trivial and challenging problem to its huge search space.we are using efficient pruning +techniques to reduce the search space.

+
+
Reference:
+

Reference: P. Veena, B. S. Chithra, R. U. Kiran, S. Agarwal and K. Zettsu, “Discovering Fuzzy Frequent +Spatial Patterns in Large Quantitative Spatiotemporal databases,” 2021 IEEE International Conference on Fuzzy Systems +(FUZZ-IEEE), 2021, pp. 1-8, doi: 10.1109/FUZZ45933.2021.9494594.

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float.

  • +
  • maxPer – float : +The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count.

  • +
  • nFile – str : +Name of the input file to mine complete set of frequent patterns

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the input file to mine complete set of fuzzy spatial frequent patterns

+
+
oFilefile

Name of the oFile file to store complete set of fuzzy spatial frequent patterns

+
+
minSupfloat

The user given minimum support

+
+
neighborsmap

keep track of neighbours of elements

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
itemsCntint

To record the number of fuzzy spatial itemSets generated

+
+
mapItemSummap

To keep track of sum of Fuzzy Values of items

+
+
mapItemRegionsmap

To Keep track of fuzzy regions of item

+
+
joinsCntint

To keep track of the number of FFI-list that was constructed

+
+
BufferSizeint

represent the size of Buffer

+
+
itemSetBufferlist

to keep track of items in buffer

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
convert(value)

To convert the given user specified value

+
+
FSFIMining( prefix, prefixLen, fsFim, minSup)

Method generate FFI from prefix

+
+
construct(px, py)

A function to construct Fuzzy itemSet from 2 fuzzy itemSets

+
+
Intersection(neighbourX,neighbourY)

Return common neighbours of 2 itemSet Neighbours

+
+
findElementWithTID(uList, tid)

To find element with same tid as given

+
+
WriteOut(prefix, prefixLen, item, sumIUtil,period)

To Store the patten

+
+
+
+
+
+

Executing the code on terminal :

+
Format:
+
+(.venv) $ python3 FFSPMiner.py <inputFile> <outputFile> <neighbours> <minSup> <sep>
+
+Example Usage:
+
+(.venv) $ python3  FFSPMiner.py sampleTDB.txt output.txt sampleN.txt 3
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Sample run of importing the code:

+
from PAMI.fuzzyGeoreferencedFrequentPattern import FFSPMiner as alg
+
+obj = alg.FFSPMiner("input.txt", "neighbours.txt", 2)
+
+obj.mine()
+
+fuzzySpatialFrequentPatterns = obj.getPatterns()
+
+print("Total number of fuzzy frequent spatial patterns:", len(fuzzySpatialFrequentPatterns))
+
+obj.save("outputFile")
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, str][source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Frequent pattern mining process will start from here

+
+
Returns:
+

None

+
+
+
+ +
+
+printResults() None[source]
+

This function is used to print the results

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Frequent pattern mining process will start from here

+
+
Returns:
+

None

+
+
+
+ +
+
+ +
+
+

PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old module

+
+
+class PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old.FFSPMiner(iFile, nFile, minSup, sep='\t')[source]
+

Bases: _fuzzySpatialFrequentPatterns

+
+
Description:
+

Fuzzy Frequent Spatial Pattern-Miner is desired to find all Spatially frequent fuzzy patterns +which is on-trivial and challenging problem to its huge search space.we are using efficient pruning +techniques to reduce the search space.

+
+
+
+
Reference: Reference: P. Veena, B. S. Chithra, R. U. Kiran, S. Agarwal and K. Zettsu, “Discovering Fuzzy Frequent

Spatial Patterns in Large Quantitative Spatiotemporal databases,” 2021 IEEE International Conference on Fuzzy Systems +(FUZZ-IEEE), 2021, pp. 1-8, doi: 10.1109/FUZZ45933.2021.9494594.

+
+
+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float.

  • +
  • maxPer – float : +The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count.

  • +
  • nFile – str : +Name of the input file to mine complete set of frequent patterns

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the input file to mine complete set of fuzzy spatial frequent patterns

+
+
oFilefile

Name of the oFile file to store complete set of fuzzy spatial frequent patterns

+
+
minSupfloat

The user given minimum support

+
+
neighborsmap

keep track of neighbours of elements

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
itemsCntint

To record the number of fuzzy spatial itemSets generated

+
+
mapItemsLowSummap

To keep track of low region values of items

+
+
mapItemsMidSummap

To keep track of middle region values of items

+
+
mapItemsHighSummap

To keep track of high region values of items

+
+
mapItemSummap

To keep track of sum of Fuzzy Values of items

+
+
mapItemRegionsmap

To Keep track of fuzzy regions of item

+
+
joinsCntint

To keep track of the number of FFI-list that was constructed

+
+
BufferSizeint

represent the size of Buffer

+
+
itemSetBufferlist

to keep track of items in buffer

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
convert(value)

To convert the given user specified value

+
+
FSFIMining( prefix, prefixLen, fsFim, minSup)

Method generate FFI from prefix

+
+
construct(px, py)

A function to construct Fuzzy itemSet from 2 fuzzy itemSets

+
+
Intersection(neighbourX,neighbourY)

Return common neighbours of 2 itemSet Neighbours

+
+
findElementWithTID(uList, tid)

To find element with same tid as given

+
+
WriteOut(prefix, prefixLen, item, sumIUtil,period)

To Store the patten

+
+
+
+
+
+

Executing the code on terminal :

+
Format:
+
+(.venv) $ python3 FFSPMiner_old.py <inputFile> <outputFile> <neighbours> <minSup> <sep>
+
+Example Usage:
+
+(.venv) $ python3  FFSPMiner_old.py sampleTDB.txt output.txt sampleN.txt 3
+
+(.venv) $ python3  FFSPMiner_old.py sampleTDB.txt output.txt sampleN.txt 0.3
+
+(.venv) $ python3  FFSPMiner_old.py sampleTDB.txt output.txt sampleN.txt 3
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Sample run of importing the code:

+
+

from PAMI.fuzzyGeoreferencedFrequentPattern import FFSPMiner as alg

+

obj = alg.FFSPMiner(“input.txt”, “neighbours.txt”, 2)

+

obj.mine()

+

fuzzySpatialFrequentPatterns = obj.getPatterns()

+

print(“Total number of fuzzy frequent spatial patterns:”, len(fuzzySpatialFrequentPatterns))

+

obj.save(“outputFile”)

+

memUSS = obj.getMemoryUSS()

+

print(“Total Memory in USS:”, memUSS)

+

memRSS = obj.getMemoryRSS()

+

print(“Total Memory in RSS”, memRSS)

+

run = obj.getRuntime()

+

print(“Total ExecutionTime in seconds:”, run)

+
+
+
+

Credits:

+
+

The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

PAMI.fuzzyGeoreferencedFrequentPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.fuzzyGeoreferencedFrequentPattern.html b/sphinx/_build/html/PAMI.fuzzyGeoreferencedFrequentPattern.html new file mode 100644 index 000000000..fe0c22198 --- /dev/null +++ b/sphinx/_build/html/PAMI.fuzzyGeoreferencedFrequentPattern.html @@ -0,0 +1,209 @@ + + + + + + + PAMI.fuzzyGeoreferencedFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.html b/sphinx/_build/html/PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.html new file mode 100644 index 000000000..d3cb39b4b --- /dev/null +++ b/sphinx/_build/html/PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.html @@ -0,0 +1,661 @@ + + + + + + + PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner module

+
+
+class PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner.FGPFPMiner(iFile, nFile, minSup, maxPer, sep)[source]
+

Bases: _fuzzySpatialFrequentPatterns

+
+
Description:
+

Fuzzy Frequent Spatial Pattern-Miner is desired to find all Spatially frequent fuzzy patterns +which is on-trivial and challenging problem to its huge search space.we are using efficient pruning +techniques to reduce the search space.

+
+
Reference:
+

+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float.

  • +
  • maxPer – float : +The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count.

  • +
  • nFile – str : +Name of the input file to mine complete set of frequent patterns

  • +
  • FuzFile – str : +The user can specify fuzFile.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the input file to mine complete set of fuzzy spatial frequent patterns

+
+
oFilefile

Name of the oFile file to store complete set of fuzzy spatial frequent patterns

+
+
minSupfloat

The user given minimum support

+
+
neighborsmap

keep track of neighbours of elements

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
itemsCntint

To record the number of fuzzy spatial itemSets generated

+
+
mapItemSummap

To keep track of sum of Fuzzy Values of items

+
+
joinsCntint

To keep track of the number of FFI-list that was constructed

+
+
BufferSizeint

represent the size of Buffer

+
+
itemSetBuffer list

to keep track of items in buffer

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
convert(value)

To convert the given user specified value

+
+
FSFIMining( prefix, prefixLen, fsFim, minSup)

Method generate FFI from prefix

+
+
construct(px, py)

A function to construct Fuzzy itemSet from 2 fuzzy itemSets

+
+
Intersection(neighbourX,neighbourY)

Return common neighbours of 2 itemSet Neighbours

+
+
findElementWithTID(uList, tid)

To find element with same tid as given

+
+
WriteOut(prefix, prefixLen, item, sumIUtil,period)

To Store the patten

+
+
+
+
+
+

Executing the code on terminal :

+
Format:
+
+(.venv) $ python3 FGPFPMiner.py <inputFile> <outputFile> <neighbours> <minSup> <maxPer> <sep>
+
+Example Usage:
+
+(.venv) $ python3  FGPFPMiner.py sampleTDB.txt output.txt sampleN.txt 3 4
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Sample run of importing the code:

+
from PAMI.fuzzyGeoreferencedPeriodicFrequentPattern import FGPFPMiner as alg
+
+obj = alg.FFSPMiner("input.txt", "neighbours.txt", 3, 4)
+
+obj.mine()
+
+print("Total number of fuzzy frequent spatial patterns:", len(obj.getPatterns()))
+
+obj.save("outputFile")
+
+print("Total Memory in USS:", obj.getMemoryUSS())
+
+print("Total Memory in RSS", obj.getMemoryRSS())
+
+print("Total ExecutionTime in seconds:", obj.getRuntime())
+
+
+
+
+

Credits:

+
+

The complete program was written by B.Sai Chitra and Kundai Kwangwari under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+printResults()[source]
+

This function is used to print the result

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old module

+
+
+class PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old.FGPFPMiner(iFile, nFile, FuzFile, minSup, maxPer, sep)[source]
+

Bases: _fuzzySpatialFrequentPatterns

+
+
Description:
+

Fuzzy Frequent Spatial Pattern-Miner is desired to find all Spatially frequent fuzzy patterns +which is on-trivial and challenging problem to its huge search space.we are using efficient pruning +techniques to reduce the search space.

+
+
Reference:
+

+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float.

  • +
  • maxPer – float : +The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count.

  • +
  • nFile – str : +Name of the input file to mine complete set of frequent patterns

  • +
  • FuzFile – str : +The user can specify fuzFile.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the input file to mine complete set of fuzzy spatial frequent patterns

+
+
oFilefile

Name of the oFile file to store complete set of fuzzy spatial frequent patterns

+
+
minSupfloat

The user given minimum support

+
+
neighborsmap

keep track of neighbours of elements

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
itemsCntint

To record the number of fuzzy spatial itemSets generated

+
+
mapItemSummap

To keep track of sum of Fuzzy Values of items

+
+
mapItemRegionsmap

To Keep track of fuzzy regions of item

+
+
joinsCntint

To keep track of the number of FFI-list that was constructed

+
+
BufferSizeint

represent the size of Buffer

+
+
itemSetBuffer list

to keep track of items in buffer

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
convert(value)

To convert the given user specified value

+
+
FSFIMining( prefix, prefixLen, fsFim, minSup)

Method generate FFI from prefix

+
+
construct(px, py)

A function to construct Fuzzy itemSet from 2 fuzzy itemSets

+
+
Intersection(neighbourX,neighbourY)

Return common neighbours of 2 itemSet Neighbours

+
+
findElementWithTID(uList, tid)

To find element with same tid as given

+
+
WriteOut(prefix, prefixLen, item, sumIUtil,period)

To Store the patten

+
+
+
+
+
+

Executing the code on terminal :

+
Format:
+
+(.venv) $ python3 FGPFPMiner_old.py <inputFile> <outputFile> <neighbours> <minSup> <maxPer> <sep>
+
+Example Usage:
+
+(.venv) $ python3  FGPFPMiner_old.py sampleTDB.txt output.txt sampleN.txt 3 4
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Sample run of importing the code:

+
+

from PAMI.fuzzyGeoreferencedPeriodicFrequentPattern import FGPFPMiner as alg

+

obj = alg.FFSPMiner(“input.txt”, “neighbours.txt”, 3, 4)

+

obj.mine()

+

print(“Total number of fuzzy frequent spatial patterns:”, len(obj.getPatterns()))

+

obj.save(“outputFile”)

+

print(“Total Memory in USS:”, obj.getMemoryUSS())

+

print(“Total Memory in RSS”, obj.getMemoryRSS())

+

print(“Total ExecutionTime in seconds:”, obj.getRuntime())

+
+
+
+

Credits:

+
+

The complete program was written by B.Sai Chitra and Kundai Kwangwari under the supervision of Professor Rage Uday Kiran.

+
+
+
+generateGraphs()[source]
+
+ +
+
+generateLatexCode(result)[source]
+
+ +
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getPatternsAsDataframe()[source]
+
+
Returns:
+

returning periodic frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.html b/sphinx/_build/html/PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.html new file mode 100644 index 000000000..a1ba2c75b --- /dev/null +++ b/sphinx/_build/html/PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.html @@ -0,0 +1,212 @@ + + + + + + + PAMI.fuzzyGeoreferencedPeriodicFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.fuzzyPartialPeriodicPatterns.basic.html b/sphinx/_build/html/PAMI.fuzzyPartialPeriodicPatterns.basic.html new file mode 100644 index 000000000..78d59471b --- /dev/null +++ b/sphinx/_build/html/PAMI.fuzzyPartialPeriodicPatterns.basic.html @@ -0,0 +1,405 @@ + + + + + + + PAMI.fuzzyPartialPeriodicPatterns.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.fuzzyPartialPeriodicPatterns.basic package

+
+

Submodules

+
+
+

PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner module

+
+
+class PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner.F3PMiner(iFile, minSup, sep='\t')[source]
+

Bases: _fuzzyPartialPeriodicPatterns

+
+
Description:
+

F3PMiner algorithm discovers the fuzzy partial periodic patterns in quantitative Irregulat multiple timeseries databases.

+
+
Reference:
+

+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float.

  • +
  • maxPer – float : +The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilestring

Name of the input file to mine complete set of fuzzy spatial frequent patterns

+
+
oFilestring

Name of the oFile file to store complete set of fuzzy spatial frequent patterns

+
+
minSupfloat

The user given minimum support

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
itemsCntint

To record the number of fuzzy spatial itemSets generated

+
+
mapItemsGSummap

To keep track of G region values of items

+
+
mapItemsMidSum: map

To keep track of M region values of items

+
+
mapItemsHSum: map

To keep track of H region values of items

+
+
mapItemSum: map

To keep track of sum of Fuzzy Values of items

+
+
mapItemRegions: map

To Keep track of fuzzy regions of item

+
+
joinsCnt: int

To keep track of the number of ffi-list that was constructed

+
+
BufferSize: int

represent the size of Buffer

+
+
itemSetBuffer list

to keep track of items in buffer

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
convert(value)

To convert the given user specified value

+
+
compareItems(o1, o2)

A Function that sort all ffi-list in ascending order of Support

+
+
F3PMining(prefix, prefixLen, FSFIM, minSup)

Method generate ffi from prefix

+
+
construct(px, py)

A function to construct Fuzzy itemSet from 2 fuzzy itemSets

+
+
findElementWithTID(uList, tid)

To find element with same tid as given

+
+
WriteOut(prefix, prefixLen, item, sumIUtil)

To Store the patten

+
+
+
+
+
+

Executing the code on terminal :

+
Format:
+
+(.venv) $ python3 F3PMiner.py <inputFile> <outputFile> <minSup> <separator>
+
+Example Usage:
+
+(.venv) $ python3  F3PMiner.py sampleTDB.txt output.txt 6
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Sample run of importing the code:

+
+

from PAMI.fuzzyPartialPeriodicPatterns import F3PMiner as alg

+

obj = alg.F3PMiner(“input.txt”, 2)

+

obj.mine()

+

fuzzyPartialPeriodicPatterns = obj.getPatterns()

+

print(“Total number of Fuzzy Frequent Patterns:”, len(fuzzyPartialPeriodicPatterns))

+

obj.save(“outputFile”)

+

memUSS = obj.getMemoryUSS()

+

print(“Total Memory in USS:”, memUSS)

+

memRSS = obj.getMemoryRSS()

+

print(“Total Memory in RSS”, memRSS)

+

run = obj.getRuntime()

+

print(“Total ExecutionTime in seconds:”, run)

+
+
+
+

Credits:

+
+

The complete program was written by PALLA Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine()[source]
+

fuzzy-Frequent pattern mining process will start from here

+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to a output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

fuzzy-Frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

PAMI.fuzzyPartialPeriodicPatterns.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.fuzzyPartialPeriodicPatterns.html b/sphinx/_build/html/PAMI.fuzzyPartialPeriodicPatterns.html new file mode 100644 index 000000000..42c7663f5 --- /dev/null +++ b/sphinx/_build/html/PAMI.fuzzyPartialPeriodicPatterns.html @@ -0,0 +1,194 @@ + + + + + + + PAMI.fuzzyPartialPeriodicPatterns package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.fuzzyPeriodicFrequentPattern.basic.html b/sphinx/_build/html/PAMI.fuzzyPeriodicFrequentPattern.basic.html new file mode 100644 index 000000000..97812c2d2 --- /dev/null +++ b/sphinx/_build/html/PAMI.fuzzyPeriodicFrequentPattern.basic.html @@ -0,0 +1,659 @@ + + + + + + + PAMI.fuzzyPeriodicFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.fuzzyPeriodicFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner module

+
+
+class PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner.FPFPMiner(iFile: str | DataFrame, minSup: int | float, period: int | float, sep: str = '\t')[source]
+

Bases: _fuzzyPeriodicFrequentPatterns

+
+
Description:
+

Fuzzy Periodic Frequent Pattern Miner is desired to find all fuzzy periodic frequent patterns which is +on-trivial and challenging problem to its huge search space.we are using efficient pruning +techniques to reduce the search space.

+
+
Reference:
+

R. U. Kiran et al., “Discovering Fuzzy Periodic-Frequent Patterns in Quantitative Temporal Databases,” +2020 IEEE International Conference on Fuzzy Systems (FUZZ-IEEE), Glasgow, UK, 2020, pp. +1-8, doi: 10.1109/FUZZ48607.2020.9177579.

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float.

  • +
  • maxPer – float : +The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the input file to mine complete set of fuzzy spatial frequent patterns

+
+
oFilefile

Name of the oFile file to store complete set of fuzzy spatial frequent patterns

+
+
minSupfloat

The user given support

+
+
period: int

periodicity of an element

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
itemsCnt: int

To record the number of fuzzy spatial itemSets generated

+
+
mapItemsLowSum: map

To keep track of low region values of items

+
+
mapItemsMidSum: map

To keep track of middle region values of items

+
+
mapItemsHighSum: map

To keep track of high region values of items

+
+
mapItemSum: map

To keep track of sum of Fuzzy Values of items

+
+
mapItemRegions: map

To Keep track of fuzzy regions of item

+
+
jointCnt: int

To keep track of the number of FFI-list that was constructed

+
+
BufferSize: int

represent the size of Buffer

+
+
itemBuffer list

to keep track of items in buffer

+
+
maxTID: int

represent the maximum tid of the database

+
+
lastTIDs: map

represent the last tid of fuzzy items

+
+
itemsToRegion: map

represent items with respective regions

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
convert(value):

To convert the given user specified value

+
+
FSFIMining( prefix, prefixLen, fsFim, minSup)

Method generate FFI from prefix

+
+
construct(px, py)

A function to construct Fuzzy itemSet from 2 fuzzy itemSets

+
+
findElementWithTID(UList, tid)

To find element with same tid as given

+
+
WriteOut(prefix, prefixLen, item, sumIUtil,period)

To Store the patten

+
+
+
+
+
+

Executing the code on terminal :

+
Format:
+
+(.venv) $ python3 FPFPMiner.py <inputFile> <outputFile> <minSup> <maxPer> <sep>
+
+Example Usage:
+
+(.venv) $ python3  FPFPMiner.py sampleTDB.txt output.txt 2 3
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Sample run of importing the code:

+
+

from PAMI.fuzzyPeriodicFrequentPattern.basic import FPFPMiner as alg

+

obj =alg.FPFPMiner(“input.txt”,2,3)

+

obj.mine()

+

periodicFrequentPatterns = obj.getPatterns()

+

print(“Total number of Fuzzy Periodic Frequent Patterns:”, len(periodicFrequentPatterns))

+

obj.save(“output.txt”)

+

memUSS = obj.getMemoryUSS()

+

print(“Total Memory in USS:”, memUSS)

+

memRSS = obj.getMemoryRSS()

+

print(“Total Memory in RSS”, memRSS)

+

run = obj.getRuntime()

+

print(“Total ExecutionTime in seconds:”, run)

+
+
+
+

Credits:

+
+

The complete program was written by Sai Chitra.B under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, str][source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Fuzzy periodic Frequent pattern mining process will start from here

+
+ +
+
+printResults() None[source]
+

This function is used to print the results

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Fuzzy periodic Frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old module

+
+
+class PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old.FPFPMiner(iFile, minSup, period, sep='\t')[source]
+

Bases: _fuzzyPeriodicFrequentPatterns

+
+
Description:
+

Fuzzy Periodic Frequent Pattern Miner is desired to find all fuzzy periodic frequent patterns which is +on-trivial and challenging problem to its huge search space.we are using efficient pruning +techniques to reduce the search space.

+
+
Reference:
+

+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float.

  • +
  • maxPer – float : +The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the input file to mine complete set of fuzzy spatial frequent patterns

+
+
oFilefile

Name of the oFile file to store complete set of fuzzy spatial frequent patterns

+
+
minSupfloat

The user given support

+
+
periodint

periodicity of an element

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
itemsCntint

To record the number of fuzzy spatial itemSets generated

+
+
mapItemsLowSummap

To keep track of low region values of items

+
+
mapItemsMidSummap

To keep track of middle region values of items

+
+
mapItemsHighSummap

To keep track of high region values of items

+
+
mapItemSummap

To keep track of sum of Fuzzy Values of items

+
+
mapItemRegionsmap

To Keep track of fuzzy regions of item

+
+
joinsCntint

To keep track of the number of FFI-list that was constructed

+
+
BufferSizeint

represent the size of Buffer

+
+
itemSetBuffer list

to keep track of items in buffer

+
+
maxTIDint

represent the maximum tid of the database

+
+
lastTIDsmap

represent the last tid of fuzzy items

+
+
itemsToRegionmap

represent items with respective regions

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
convert(value)

To convert the given user specified value

+
+
FSFIMining( prefix, prefixLen, fsFim, minSup)

Method generate FFI from prefix

+
+
construct(px, py)

A function to construct Fuzzy itemSet from 2 fuzzy itemSets

+
+
findElementWithTID(UList, tid)

To find element with same tid as given

+
+
WriteOut(prefix, prefixLen, item, sumIUtil,period)

To Store the patten

+
+
+
+
+
+

Executing the code on terminal :

+
Format:
+
+(.venv) $ python3 FPFPMiner_old.py <inputFile> <outputFile> <minSup> <maxPer> <sep>
+
+Example Usage:
+
+(.venv) $ python3  FPFPMiner_old.py sampleTDB.txt output.txt 2 3
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Sample run of importing the code:

+
+

from PAMI.fuzzyPeriodicFrequentPattern.basic import FPFPMiner as alg

+

obj =alg.FPFPMiner(“input.txt”,2,3)

+

obj.mine()

+

periodicFrequentPatterns = obj.getPatterns()

+

print(“Total number of Fuzzy Periodic Frequent Patterns:”, len(periodicFrequentPatterns))

+

obj.save(“output.txt”)

+

memUSS = obj.getMemoryUSS()

+

print(“Total Memory in USS:”, memUSS)

+

memRSS = obj.getMemoryRSS()

+

print(“Total Memory in RSS”, memRSS)

+

run = obj.getRuntime()

+

print(“Total ExecutionTime in seconds:”, run)

+
+
+
+

Credits:

+
+

The complete program was written by Sai Chitra.B under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine()[source]
+

Fuzzy periodic Frequent pattern mining process will start from here

+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv ile) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Fuzzy periodic Frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

PAMI.fuzzyPeriodicFrequentPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.fuzzyPeriodicFrequentPattern.html b/sphinx/_build/html/PAMI.fuzzyPeriodicFrequentPattern.html new file mode 100644 index 000000000..220278322 --- /dev/null +++ b/sphinx/_build/html/PAMI.fuzzyPeriodicFrequentPattern.html @@ -0,0 +1,209 @@ + + + + + + + PAMI.fuzzyPeriodicFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.geoReferencedPeriodicFrequentPattern.basic.html b/sphinx/_build/html/PAMI.geoReferencedPeriodicFrequentPattern.basic.html new file mode 100644 index 000000000..7a5750379 --- /dev/null +++ b/sphinx/_build/html/PAMI.geoReferencedPeriodicFrequentPattern.basic.html @@ -0,0 +1,420 @@ + + + + + + + PAMI.geoReferencedPeriodicFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.geoReferencedPeriodicFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner module

+
+
+class PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner.GPFPMiner(iFile, nFile, minSup, maxPer, sep='\t')[source]
+

Bases: _geoReferencedPeriodicFrequentPatterns

+
+
Description:
+

GPFPMiner is an Extension of ÉCLAT algorithm,which stands for Equivalence Class Clustering and

+
+
+

bottom-up Lattice Traversal to mine the geo referenced periodic frequent patterns.

+
+
Reference:
+

+
Parameters:
+
    +
  • iFile – str +Name of the Input file to mine complete set of Geo-referenced periodic frequent patterns

  • +
  • oFile – str +Name of the output file to store complete set of Geo-referenced periodic frequent patterns

  • +
  • minSup – int or float or str +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float.

  • +
  • maxPer – float +The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count.

  • +
  • nFile – str +Name of the input file to mine complete set of Geo-referenced periodic frequent patterns

  • +
  • sep – str +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilestr

Input file name or path of the input file

+
+
nFilestr

Name of Neighbourhood file name

+
+
minSupfloat or int or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
maxPerfloat or int or str

The user can specify maxPer either in count or proportion of database size. +If the program detects the data type of maxPer is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default separator is tab space or . +However, the users can override their default separator.

+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
oFilestr

Name of the output file to store complete set of frequent patterns

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the complete set of transactions available in the input database/file

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrames()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets(iFileName)

Storing the complete transactions of the database/input file in a database variable

+
+
frequentOneItem()

Generating one frequent patterns

+
+
convert(value)

To convert the given user specified value

+
+
getNeighbourItems(keySet)

A function to get common neighbours of a itemSet

+
+
mapNeighbours(file)

A function to map items to their neighbours

+
+
+
+
+
+

Executing the code on terminal :

+
Format:
+
+(.venv) $ python3 GPFPMiner.py <inputFile> <outputFile> <neighbourFile> <minSup> <maxPer>
+
+Example Usage:
+
+(.venv) $ python3 GPFPMiner.py sampleTDB.txt output.txt sampleN.txt 0.5 0.3
+
+
+
+

Note

+

minSup & maxPer will be considered in percentage of database transactions

+
+
+
+

Sample run of importing the code :

+
import PAMI.geoReferencedPeridicFrequentPattern.GPFPMiner as alg
+
+obj = alg.GPFPMiner("sampleTDB.txt", "sampleN.txt", 5, 3)
+
+obj.mine()
+
+Patterns = obj.getPatterns()
+
+print("Total number of Geo Referenced Periodic-Frequent Patterns:", len(Patterns))
+
+obj.save("outFile")
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.RaviKumar under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function +:return: returning RSS memory consumed by the mining process +:rtype: float

+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mapNeighbours()[source]
+

A function to map items to their Neighbours

+
+ +
+
+mine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to a output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

PAMI.geoReferencedPeriodicFrequentPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.geoReferencedPeriodicFrequentPattern.html b/sphinx/_build/html/PAMI.geoReferencedPeriodicFrequentPattern.html new file mode 100644 index 000000000..5411b7402 --- /dev/null +++ b/sphinx/_build/html/PAMI.geoReferencedPeriodicFrequentPattern.html @@ -0,0 +1,195 @@ + + + + + + + PAMI.geoReferencedPeriodicFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.georeferencedFrequentPattern.basic.html b/sphinx/_build/html/PAMI.georeferencedFrequentPattern.basic.html new file mode 100644 index 000000000..23ed9bd96 --- /dev/null +++ b/sphinx/_build/html/PAMI.georeferencedFrequentPattern.basic.html @@ -0,0 +1,422 @@ + + + + + + + PAMI.georeferencedFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.georeferencedFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.georeferencedFrequentPattern.basic.FSPGrowth module

+
+
+

PAMI.georeferencedFrequentPattern.basic.SpatialECLAT module

+
+
+class PAMI.georeferencedFrequentPattern.basic.SpatialECLAT.SpatialECLAT(iFile, nFile, minSup, sep='\t')[source]
+

Bases: _spatialFrequentPatterns

+
+
Description:
+

Spatial Eclat is a Extension of ECLAT algorithm,which stands for Equivalence Class Clustering and bottom-up +Lattice Traversal.It is one of the popular methods of Association Rule mining. It is a more efficient and +scalable version of the Apriori algorithm.

+
+
Reference:
+

Rage, Uday & Fournier Viger, Philippe & Zettsu, Koji & Toyoda, Masashi & Kitsuregawa, Masaru. (2020). +Discovering Frequent Spatial Patterns in Very Large Spatiotemporal Databases.

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of Geo-referenced frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of Geo-referenced frequent patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float.

  • +
  • maxPer – float : +The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count.

  • +
  • nFile – str : +Name of the input file to mine complete set of Geo-referenced frequent patterns

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilestr

Input file name or path of the input file

+
+
nFilestr

Name of Neighbourhood file name

+
+
minSupint or float or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
oFilestr

Name of the output file to store complete set of frequent patterns

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the complete set of transactions available in the input database/file

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets(iFileName)

Storing the complete transactions of the database/input file in a database variable

+
+
frequentOneItem()

Generating one frequent patterns

+
+
dictKeysToInt(iList)

Converting dictionary keys to integer elements

+
+
eclatGeneration(cList)

It will generate the combinations of frequent items

+
+
generateSpatialFrequentPatterns(tidList)

It will generate the combinations of frequent items from a list of items

+
+
convert(value)

To convert the given user specified value

+
+
getNeighbourItems(keySet)

A function to get common neighbours of a itemSet

+
+
mapNeighbours(file)

A function to map items to their neighbours

+
+
+
+
+
+

Executing the code on terminal :

+
Format:
+
+(.venv) $ python3 SpatialECLAT.py <inputFile> <outputFile> <neighbourFile> <minSup>
+
+Example Usage:
+
+(.venv) $ python3 SpatialECLAT.py sampleTDB.txt output.txt sampleN.txt 0.5
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Sample run of importing the code :

+
from PAMI.georeferencedFrequentPattern.basic import SpatialECLAT as alg
+
+obj = alg.SpatialECLAT("sampleTDB.txt", "sampleN.txt", 5)
+
+obj.mine()
+
+spatialFrequentPatterns = obj.getPatterns()
+
+print("Total number of Spatial Frequent Patterns:", len(spatialFrequentPatterns))
+
+obj.save("outFile")
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to a output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

PAMI.georeferencedFrequentPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.georeferencedFrequentPattern.html b/sphinx/_build/html/PAMI.georeferencedFrequentPattern.html new file mode 100644 index 000000000..3fec36140 --- /dev/null +++ b/sphinx/_build/html/PAMI.georeferencedFrequentPattern.html @@ -0,0 +1,195 @@ + + + + + + + PAMI.georeferencedFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.georeferencedFrequentSequencePattern.html b/sphinx/_build/html/PAMI.georeferencedFrequentSequencePattern.html new file mode 100644 index 000000000..2da2e8666 --- /dev/null +++ b/sphinx/_build/html/PAMI.georeferencedFrequentSequencePattern.html @@ -0,0 +1,172 @@ + + + + + + + PAMI.georeferencedFrequentSequencePattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.georeferencedFrequentSequencePattern package

+
+

Submodules

+
+
+

PAMI.georeferencedFrequentSequencePattern.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.georeferencedPartialPeriodicPattern.basic.html b/sphinx/_build/html/PAMI.georeferencedPartialPeriodicPattern.basic.html new file mode 100644 index 000000000..18a91126a --- /dev/null +++ b/sphinx/_build/html/PAMI.georeferencedPartialPeriodicPattern.basic.html @@ -0,0 +1,428 @@ + + + + + + + PAMI.georeferencedPartialPeriodicPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.georeferencedPartialPeriodicPattern.basic package

+
+

Submodules

+
+
+

PAMI.georeferencedPartialPeriodicPattern.basic.STEclat module

+
+
+class PAMI.georeferencedPartialPeriodicPattern.basic.STEclat.STEclat(iFile, nFile, minPS, maxIAT, sep='\t')[source]
+

Bases: _partialPeriodicSpatialPatterns

+
+
Description:
+

STEclat is one of the fundamental algorithm to discover georefereneced partial periodic-frequent patterns in a transactional database.

+
+
Reference:
+

R. Uday Kiran, C. Saideep, K. Zettsu, M. Toyoda, M. Kitsuregawa and P. Krishna Reddy, +“Discovering Partial Periodic Spatial Patterns in Spatiotemporal Databases,” 2019 IEEE International

+
+

Conference on Big Data (Big Data), 2019, pp. 233-238, doi: 10.1109/BigData47090.2019.9005693.

+
+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of Geo-referenced Partial Periodic patterns

  • +
  • oFile – str : +Name of the output file to store complete set of Geo-referenced Partial Periodic patterns

  • +
  • minPS – int or float or str : +The user can specify minPS either in count or proportion of database size. If the program detects the data type of minPS is integer, then it treats minPS is expressed in count. Otherwise, it will be treated as float.

  • +
  • maxIAT – int or float or str : +The user can specify maxIAT either in count or proportion of database size. If the program detects the data type of maxIAT is integer, then it treats maxIAT is expressed in count. Otherwise, it will be treated as float.

  • +
  • nFile – str : +Name of the input file to mine complete set of Geo-referenced Partial Periodic patterns

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilestr

Input file name or path of the input file

+
+
nFilestr

Name of Neighbourhood file name

+
+
maxIATfloat or int or str

The user can specify maxIAT either in count or proportion of database size. +If the program detects the data type of maxIAT is integer, then it treats maxIAT is expressed in count. +Otherwise, it will be treated as float. +Example: maxIAT=10 will be treated as integer, while maxIAT=10.0 will be treated as float

+
+
minPSfloat or int or str

The user can specify minPS either in count or proportion of database size. +If the program detects the data type of minPS is integer, then it treats minPS is expressed in count. +Otherwise, it will be treated as float. +Example: minPS=10 will be treated as integer, while minPS=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default separator is tab space or . +However, the users can override their default separator.

+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
oFilestr

Name of the output file to store complete set of frequent patterns

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the complete set of transactions available in the input database/file

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrames()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets(iFileName)

Storing the complete transactions of the database/input file in a database variable

+
+
frequentOneItem()

Generating one frequent patterns

+
+
convert(value):

To convert the given user specified value

+
+
getNeighbourItems(keySet)

A function to get common neighbours of a itemSet

+
+
mapNeighbours(file)

A function to map items to their neighbours

+
+
+
+
+
+

Executing the code on terminal :

+
Format:
+
+(.venv) $ python3 STEclat.py <inputFile> <outputFile> <neighbourFile>  <minPS>  <maxIAT>
+
+Example Usage:
+
+(.venv) $ python3 STEclat.py sampleTDB.txt output.txt sampleN.txt 0.2 0.5
+
+
+
+

Note

+

maxIAT & minPS will be considered in percentage of database transactions

+
+
+
+

Sample run of importing the code :

+
import PAMI.georeferencedPartialPeriodicPattern.STEclat as alg
+
+obj = alg.STEclat("sampleTDB.txt", "sampleN.txt", 3, 4)
+
+obj.mine()
+
+partialPeriodicSpatialPatterns = obj.getPatterns()
+
+print("Total number of Periodic Spatial Frequent Patterns:", len(partialPeriodicSpatialPatterns))
+
+obj.save("outFile")
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P. Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mapNeighbours()[source]
+

A function to map items to their Neighbours

+
+ +
+
+mine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

PAMI.georeferencedPartialPeriodicPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.georeferencedPartialPeriodicPattern.html b/sphinx/_build/html/PAMI.georeferencedPartialPeriodicPattern.html new file mode 100644 index 000000000..9bb473d92 --- /dev/null +++ b/sphinx/_build/html/PAMI.georeferencedPartialPeriodicPattern.html @@ -0,0 +1,195 @@ + + + + + + + PAMI.georeferencedPartialPeriodicPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.highUtilityFrequentPattern.basic.html b/sphinx/_build/html/PAMI.highUtilityFrequentPattern.basic.html new file mode 100644 index 000000000..80a2c03f3 --- /dev/null +++ b/sphinx/_build/html/PAMI.highUtilityFrequentPattern.basic.html @@ -0,0 +1,452 @@ + + + + + + + PAMI.highUtilityFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.highUtilityFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.highUtilityFrequentPattern.basic.HUFIM module

+
+
+class PAMI.highUtilityFrequentPattern.basic.HUFIM.HUFIM(iFile: str, minUtil: int | float, minSup: int | float, sep: str = '\t')[source]
+

Bases: _utilityPatterns

+
+
Description:
+

HUFIM (High Utility Frequent Itemset Miner) algorithm helps us to mine High Utility Frequent ItemSets (HUFIs) from transactional databases.

+
+
Reference:
+

Kiran, R.U., Reddy, T.Y., Fournier-Viger, P., Toyoda, M., Reddy, P.K., & Kitsuregawa, M. (2019). +Efficiently Finding High Utility-Frequent Itemsets Using Cutoff and Suffix Utility. PAKDD 2019. +DOI: 10.1007/978-3-030-16145-3_15

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of Geo-referenced frequent sequence patterns

  • +
  • oFile – str : +Name of the output file to store complete set of Geo-referenced frequent sequence patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float.

  • +
  • minUtil – int : +The user given minUtil value.

  • +
  • candidateCount – int +Number of candidates

  • +
  • maxMemory – int +Maximum memory used by this program for running

  • +
  • nFile – str : +Name of the input file to mine complete set of Geo-referenced frequent sequence patterns

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the input file to mine complete set of patterns

+
+
oFilefile

Name of the output file to store complete set of patterns

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
minUtilint

The user given minUtil value

+
+
minSupfloat

The user given minSup value

+
+
highUtilityFrequentItemSets: map

set of high utility frequent itemSets

+
+
candidateCount: int

Number of candidates

+
+
utilityBinArrayLU: list

A map to hold the local utility values of the items in database

+
+
utilityBinArraySU: list

A map to hold the subtree utility values of the items is database

+
+
oldNamesToNewNames: list

A map which contains old names, new names of items as key value pairs

+
+
newNamesToOldNames: list

A map which contains new names, old names of items as key value pairs

+
+
singleItemSetsSupport: map

A map which maps from single itemsets (items) to their support

+
+
singleItemSetsUtility: map

A map which maps from single itemsets (items) to their utilities

+
+
maxMemory: float

Maximum memory used by this program for running

+
+
patternCount: int

Number of RHUI’s

+
+
itemsToKeep: list

keep only the promising items i.e items that can extend other items to form RHUIs

+
+
itemsToExplore: list

list of items that needs to be explored

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
backTrackingHUFIM(transactionsOfP, itemsToKeep, itemsToExplore, prefixLength)

A method to mine the RHUIs Recursively

+
+
useUtilityBinArraysToCalculateUpperBounds(transactionsPe, j, itemsToKeep)

A method to calculate the sub-tree utility and local utility of all items that can extend itemSet P and e

+
+
output(tempPosition, utility)

A method to output a relative-high-utility itemSet to file or memory depending on what the user chose

+
+
isEqual(transaction1, transaction2)

A method to Check if two transaction are identical

+
+
useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(dataset)

A method to calculate the sub tree utility values for single items

+
+
sortDatabase(self, transactions)

A Method to sort transaction

+
+
sortTransaction(self, trans1, trans2)

A Method to sort transaction

+
+
useUtilityBinArrayToCalculateLocalUtilityFirstTime(self, dataset)

A method to calculate local utility values for single itemSets

+
+
+
+
+
+

Executing the code on terminal

+
Format:
+
+(.venv) $ python3 HUFIM.py <inputFile> <outputFile> <minUtil> <sep>
+
+Example Usage:
+
+(.venv) $ python3 HUFIM.py sampleTDB.txt output.txt 35 20
+
+(.venv) $ python3 HUFIM.py sampleTDB.txt output.txt 35 20
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Sample run of importing the code

+
from PAMI.highUtilityFrequentPattern.basic import HUFIM as alg
+
+obj=alg.HUFIM("input.txt", 35, 20)
+
+obj.mine()
+
+Patterns = obj.getPatterns()
+
+print("Total number of high utility frequent Patterns:", len(Patterns))
+
+obj.save("output")
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by pradeep pallikila under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, List[int | float]][source]
+

Function to send the set of patterns after completion of the mining process

+
+
Returns:
+

returning patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final patterns in a dataframe

+
+
Returns:
+

returning patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

High Utility Frequent Pattern mining start here

+
+
Returns:
+

None

+
+
+
+ +
+
+printResults() None[source]
+

This function is used to print the results

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

High Utility Frequent Pattern mining start here

+
+
Returns:
+

None

+
+
+
+ +
+
+ +
+
+

PAMI.highUtilityFrequentPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.highUtilityFrequentPattern.html b/sphinx/_build/html/PAMI.highUtilityFrequentPattern.html new file mode 100644 index 000000000..adf8a4dcd --- /dev/null +++ b/sphinx/_build/html/PAMI.highUtilityFrequentPattern.html @@ -0,0 +1,194 @@ + + + + + + + PAMI.highUtilityFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.highUtilityGeoreferencedFrequentPattern.basic.html b/sphinx/_build/html/PAMI.highUtilityGeoreferencedFrequentPattern.basic.html new file mode 100644 index 000000000..4af175b06 --- /dev/null +++ b/sphinx/_build/html/PAMI.highUtilityGeoreferencedFrequentPattern.basic.html @@ -0,0 +1,440 @@ + + + + + + + PAMI.highUtilityGeoreferencedFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.highUtilityGeoreferencedFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM module

+
+
+class PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM.SHUFIM(iFile, nFile, minUtil, minSup, sep='\t')[source]
+

Bases: _utilityPatterns

+
+
Description:
+

Spatial High Utility Frequent ItemSet Mining (SHUFIM) aims to discover all itemSets in a spatioTemporal database +that satisfy the user-specified minimum utility, minimum support and maximum distance constraints

+
+
Reference:
+

10.1007/978-3-030-37188-3_17

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of Geo-referenced frequent sequence patterns

  • +
  • oFile – str : +Name of the output file to store complete set of Geo-referenced frequent sequence patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float.

  • +
  • minUtil – int : +The user given minUtil value.

  • +
  • candidateCount – int +Number of candidates

  • +
  • maxMemory – int +Maximum memory used by this program for running

  • +
  • nFile – str : +Name of the input file to mine complete set of Geo-referenced frequent sequence patterns

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the input file to mine complete set of frequent patterns

+
+
nFilefile

Name of the Neighbours file that contain neighbours of items

+
+
oFilefile

Name of the output file to store complete set of frequent patterns

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
minUtilint

The user given minUtil

+
+
minSupfloat

The user given minSup value

+
+
highUtilityFrequentSpatialItemSets: map

set of high utility itemSets

+
+
candidateCount: int

Number of candidates

+
+
utilityBinArrayLU: list

A map to hold the pmu values of the items in database

+
+
utilityBinArraySU: list

A map to hold the subtree utility values of the items is database

+
+
oldNamesToNewNames: list

A map to hold the subtree utility values of the items is database

+
+
newNamesToOldNames: list

A map to store the old name corresponding to new name

+
+
Neighboursmap

A dictionary to store the neighbours of a item

+
+
maxMemory: float

Maximum memory used by this program for running

+
+
patternCount: int

Number of SHUFI’s (Spatial High Utility Frequent Itemsets)

+
+
itemsToKeep: list

keep only the promising items ie items whose supersets can be required patterns

+
+
itemsToExplore: list

keep items that subtreeUtility grater than minUtil

+
+
+
+
+

:Methods :

+
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
calculateNeighbourIntersection(self, prefixLength)

A method to return common Neighbours of items

+
+
backtrackingEFIM(transactionsOfP, itemsToKeep, itemsToExplore, prefixLength)

A method to mine the SHUIs Recursively

+
+
useUtilityBinArraysToCalculateUpperBounds(transactionsPe, j, itemsToKeep, neighbourhoodList)

A method to calculate the sub-tree utility and local utility of all items that can extend itemSet P and e

+
+
output(tempPosition, utility)

A method ave a high-utility itemSet to file or memory depending on what the user chose

+
+
isEqual(transaction1, transaction2)

A method to Check if two transaction are identical

+
+
intersection(lst1, lst2)

A method that return the intersection of 2 list

+
+
useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(dataset)

Scan the initial database to calculate the subtree utility of each items using a utility-bin array

+
+
sortDatabase(self, transactions)

A Method to sort transaction in the order of PMU

+
+
sortTransaction(self, trans1, trans2)

A Method to sort transaction in the order of PMU

+
+
useUtilityBinArrayToCalculateLocalUtilityFirstTime(self, dataset)

A method to scan the database using utility bin array to calculate the pmus

+
+
+
+
+

Executing the code on terminal :

+
Format:
+
+(.venv) $ python3 SHUFIM.py <inputFile> <outputFile> <Neighbours> <minUtil> <minSup> <sep>
+
+Example Usage:
+
+(.venv) $ python3 SHUFIM.py sampleTDB.txt output.txt sampleN.txt 35 20
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Sample run of importing the code:

+
from PAMI.highUtilityGeoreferencedFrequentPattern.basic import SHUFIM as alg
+
+obj=alg.SHUFIM("input.txt","Neighbours.txt",35,20)
+
+obj.mine()
+
+patterns = obj.getPatterns()
+
+print("Total number of Spatial high utility frequent Patterns:", len(patterns))
+
+obj.save("output")
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by Pradeep Pallikila under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of patterns after completion of the mining process

+
+
Returns:
+

returning patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final patterns in a dataframe +:return: returning patterns in a dataframe +:rtype: pd.DataFrame

+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine()[source]
+

High Utility Frequent Pattern mining start here

+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

High Utility Frequent Pattern mining start here

+
+ +
+
+ +
+
+PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM.main()[source]
+
+ +
+
+

PAMI.highUtilityGeoreferencedFrequentPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.highUtilityGeoreferencedFrequentPattern.html b/sphinx/_build/html/PAMI.highUtilityGeoreferencedFrequentPattern.html new file mode 100644 index 000000000..3b5efd379 --- /dev/null +++ b/sphinx/_build/html/PAMI.highUtilityGeoreferencedFrequentPattern.html @@ -0,0 +1,195 @@ + + + + + + + PAMI.highUtilityGeoreferencedFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.highUtilityPattern.basic.html b/sphinx/_build/html/PAMI.highUtilityPattern.basic.html new file mode 100644 index 000000000..5e8f93736 --- /dev/null +++ b/sphinx/_build/html/PAMI.highUtilityPattern.basic.html @@ -0,0 +1,824 @@ + + + + + + + PAMI.highUtilityPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.highUtilityPattern.basic package

+
+

Submodules

+
+
+

PAMI.highUtilityPattern.basic.EFIM module

+
+
+class PAMI.highUtilityPattern.basic.EFIM.EFIM(iFile, minUtil, sep='\t')[source]
+

Bases: _utilityPatterns

+
+
Description:
+

EFIM is one of the fastest algorithm to mine High Utility ItemSets from transactional databases.

+
+
Reference:
+

Zida, S., Fournier-Viger, P., Lin, J.CW. et al. EFIM: a fast and memory efficient algorithm for +high-utility itemset mining. Knowl Inf Syst 51, 595–625 (2017). https://doi.org/10.1007/s10115-016-0986-0

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of High Utility patterns

  • +
  • oFile – str : +Name of the output file to store complete set of High Utility patterns

  • +
  • minUtil – int : +The user given minUtil value.

  • +
  • candidateCount – int +Number of candidates specified by user

  • +
  • maxMemory – int +Maximum memory used by this program for running

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the input file to mine complete set of high utility patterns

+
+
oFilefile

Name of the output file to store complete set of high utility patterns

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
minUtilint

The user given minUtil value

+
+
highUtilityitemSets: map

set of high utility itemSets

+
+
candidateCount: int

Number of candidates

+
+
utilityBinArrayLU: list

A map to hold the local utility values of the items in database

+
+
utilityBinArraySU: list

A map to hold the subtree utility values of the items is database

+
+
oldNamesToNewNames: list

A map which contains old names, new names of items as key value pairs

+
+
newNamesToOldNames: list

A map which contains new names, old names of items as key value pairs

+
+
maxMemory: float

Maximum memory used by this program for running

+
+
patternCount: int

Number of HUI’s

+
+
itemsToKeep: list

keep only the promising items ie items having local utility values greater than or equal to minUtil

+
+
itemsToExplore: list

list of items that have subtreeUtility value greater than or equal to minUtil

+
+
+
+
+

:Methods :

+
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
backTrackingEFIM(transactionsOfP, itemsToKeep, itemsToExplore, prefixLength)

A method to mine the HUIs Recursively

+
+
useUtilityBinArraysToCalculateUpperBounds(transactionsPe, j, itemsToKeep)

A method to calculate the sub-tree utility and local utility of all items that can extend itemSet P and e

+
+
output(tempPosition, utility)

A method to output a high-utility itemSet to file or memory depending on what the user chose

+
+
is_equal(transaction1, transaction2)

A method to Check if two transaction are identical

+
+
useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(dataset)

A method to calculate the sub tree utility values for single items

+
+
sortDatabase(self, transactions)

A Method to sort transaction

+
+
sort_transaction(self, trans1, trans2)

A Method to sort transaction

+
+
useUtilityBinArrayToCalculateLocalUtilityFirstTime(self, dataset)

A method to calculate local utility values for single itemsets

+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 EFIM.py <inputFile> <outputFile> <minUtil> <sep>
+
+Example Usage:
+
+(.venv) $ python3 EFIM sampleTDB.txt output.txt 35
+
+
+
+

Note

+

maxMemory will be considered as Maximum memory used by this program for running

+
+
+
+

Sample run of importing the code:

+
from PAMI.highUtilityPattern.basic import EFIM as alg
+
+obj=alg.EFIM("input.txt",35)
+
+obj.mine()
+
+Patterns = obj.getPatterns()
+
+print("Total number of high utility Patterns:", len(Patterns))
+
+obj.save("output")
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by pradeep pallikila under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function +:return: returning RSS memory consumed by the mining process +:rtype: float

+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function +:return: returning USS memory consumed by the mining process +:rtype: float

+
+ +
+
+getPatterns() dict[source]
+

Function to send the set of patterns after completion of the mining process +:return: returning patterns +:rtype: dict

+
+ +
+
+getPatternsAsDataFrame() _pd.DataFrame[source]
+

Storing final patterns in a dataframe +:return: returning patterns in a dataframe +:rtype: pd.DataFrame

+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process +:return: returning total amount of runtime taken by the mining process +:rtype: float

+
+ +
+
+mine() None[source]
+

Start the EFIM algorithm. +:return: None

+
+ +
+
+printResults() None[source]
+

This function is used to print the results

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file +:param outFile: name of the output file +:type outFile: csv file +:return: None

+
+ +
+
+sort_transaction(trans1: _Transaction, trans2: _Transaction) int[source]
+

A Method to sort transaction +:param trans1: the first transaction +:type trans1: Trans +:param trans2:the second transaction +:type trans2: Trans +:return: sorted transaction +:rtype: int

+
+ +
+
+startMine() None[source]
+

Start the EFIM algorithm. +:return: None

+
+ +
+
+ +
+
+

PAMI.highUtilityPattern.basic.HMiner module

+
+
+class PAMI.highUtilityPattern.basic.HMiner.HMiner(iFile1, minUtil, sep='\t')[source]
+

Bases: _utilityPatterns

+
+
Description:
+

High Utility itemSet Mining (HMIER) is an importent algorithm to miner High utility items from the database.

+
+
Reference:
+

+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of High Utility patterns

  • +
  • oFile – str : +Name of the output file to store complete set of High Utility patterns

  • +
  • minUtil – int : +The user given minUtil value.

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float.

  • +
  • maxPer – float : +The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the input file to mine complete set of frequent patterns

+
+
oFilefile

Name of the output file to store complete set of frequent patterns

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
minUtilint

The user given minUtil

+
+
mapFMAP: list

EUCS map of the FHM algorithm

+
+
candidates: int

candidates genetated

+
+
huiCnt: int

huis created

+
+
neighbors: map

keep track of nighboues of elements

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
Explore_SearchTree(prefix, uList, minUtil)

A method to find all high utility itemSets

+
+
UpdateCLosed(x, culs, st, excul, newT, ex, ey_ts, length)

A method to update closed values

+
+
saveitemSet(prefix, prefixLen, item, utility)

A method to save itemSets

+
+
updateElement(z, culs, st, excul, newT, ex, duppos, ey_ts)

A method to updates vales for duplicates

+
+
construcCUL(x, culs, st, minUtil, length, exnighbors)

A method to construct CUL’s database

+
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 HMiner.py <inputFile> <outputFile> <minUtil>
+
+Example Usage:
+
+(.venv) $ python3 HMiner.py sampleTDB.txt output.txt 35
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Sample run of importing the code:

+
from PAMI.highUtilityPattern.basic import HMiner as alg
+
+obj = alg.HMiner("input.txt",35)
+
+obj.mine()
+
+Patterns = obj.getPatterns()
+
+print("Total number of high utility Patterns:", len(Patterns))
+
+obj.save("output")
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function +:return: returning RSS memory consumed by the mining process +:rtype: float

+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function +:return: returning USS memory consumed by the mining process +:rtype: float

+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process +:return: returning frequent patterns +:rtype: dict

+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe +:return: returning frequent patterns in a dataframe +:rtype: pd.DataFrame

+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process +:return: returning total amount of runtime taken by the mining process +:rtype: float

+
+ +
+
+mine()[source]
+

Main program to start the operation

+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file +:param outFile: name of the output file +:type outFile: csv file

+
+ +
+
+startMine()[source]
+

Main program to start the operation

+
+ +
+
+ +
+
+

PAMI.highUtilityPattern.basic.UPGrowth module

+
+
+class PAMI.highUtilityPattern.basic.UPGrowth.UPGrowth(iFile: str, minUtil: int, sep: str = '\t')[source]
+

Bases: _utilityPatterns

+
+
Description:
+

UP-Growth is two-phase algorithm to mine High Utility Itemsets from transactional databases.

+
+
Reference:
+

Vincent S. Tseng, Cheng-Wei Wu, Bai-En Shie, and Philip S. Yu. 2010. UP-Growth: an efficient algorithm for high utility itemset mining. +In Proceedings of the 16th ACM SIGKDD international conference on Knowledge discovery and data mining (KDD ‘10). +Association for Computing Machinery, New York, NY, USA, 253–262. DOI:https://doi.org/10.1145/1835804.1835839

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of High Utility patterns

  • +
  • oFile – str : +Name of the output file to store complete set of High Utility patterns

  • +
  • minUtil – int : +The user given minUtil value.

  • +
  • candidateCount – int +Number of candidates specified by user

  • +
  • maxMemory – int +Maximum memory used by this program for running

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the input file to mine complete set of frequent patterns

+
+
oFilefile

Name of the output file to store complete set of frequent patterns

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
minUtilint

The user given minUtil

+
+
NumberOfNodesint

Total number of nodes generated while building the tree

+
+
ParentNumberOfNodesint

Total number of nodes required to build the parent tree

+
+
MapItemToMinimumUtilitymap

A map to store the minimum utility of item in the database

+
+
phuislist

A list to store the phuis

+
+
MapItemToTwumap

A map to store the twu of each item in database

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
createLocalTree(tree, item)

A Method to Construct conditional pattern base

+
+
UPGrowth( tree, alpha)

A Method to Mine UP Tree recursively

+
+
PrintStats()

A Method to print number of phuis

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to an output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 UPGrowth <inputFile> <outputFile> <Neighbours> <minUtil> <sep>
+
+Example Usage:
+
+(.venv) $ python3 UPGrowth sampleTDB.txt output.txt sampleN.txt 35
+
+
+
+

Note

+

maxMemory will be considered as Maximum memory used by this program for running

+
+
+
+

Sample run of importing the code:

+
from PAMI.highUtilityPattern.basic import UPGrowth as alg
+
+obj=alg.UPGrowth("input.txt",35)
+
+obj.mine()
+
+highUtilityPattern = obj.getPatterns()
+
+print("Total number of Spatial Frequent Patterns:", len(highUtilityPattern))
+
+obj.save("output")
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by Pradeep pallikila under the supervision of Professor Rage Uday Kiran.

+
+
+
+PrintStats() None[source]
+

A Method to print number of phuis +:return: None

+
+ +
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function +:return: returning RSS memory consumed by the mining process +:rtype: float

+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function +:return: returning USS memory consumed by the mining process +:rtype: float

+
+ +
+
+getPatterns() dict[source]
+

Function to send the set of frequent patterns after completion of the mining process +:return: returning frequent patterns +:rtype: dict

+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe +:return: returning frequent patterns in a dataframe +:rtype: pd.DataFrame

+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process +return: returning total amount of runtime taken by the mining process +:rtype: float

+
+ +
+
+mine() None[source]
+

Mining process will start from here +:return: None

+
+ +
+
+printResults() None[source]
+

This function is used to print the results +:return: None

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file +:param outFile: name of the output file +:type outFile: csv file +:return: None

+
+ +
+
+startMine() None[source]
+

Mining process will start from here +:return: None

+
+ +
+
+ +
+
+

PAMI.highUtilityPattern.basic.abstract module

+
+
+

PAMI.highUtilityPattern.basic.efimParallel module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.highUtilityPattern.html b/sphinx/_build/html/PAMI.highUtilityPattern.html new file mode 100644 index 000000000..4859e74cb --- /dev/null +++ b/sphinx/_build/html/PAMI.highUtilityPattern.html @@ -0,0 +1,234 @@ + + + + + + + PAMI.highUtilityPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.highUtilityPattern.parallel.html b/sphinx/_build/html/PAMI.highUtilityPattern.parallel.html new file mode 100644 index 000000000..ca1708b23 --- /dev/null +++ b/sphinx/_build/html/PAMI.highUtilityPattern.parallel.html @@ -0,0 +1,176 @@ + + + + + + + PAMI.highUtilityPattern.parallel package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.highUtilityPattern.parallel package

+
+

Submodules

+
+
+

PAMI.highUtilityPattern.parallel.abstract module

+
+
+

PAMI.highUtilityPattern.parallel.efimparallel module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.highUtilityPatternsInStreams.html b/sphinx/_build/html/PAMI.highUtilityPatternsInStreams.html new file mode 100644 index 000000000..a8f5d8eec --- /dev/null +++ b/sphinx/_build/html/PAMI.highUtilityPatternsInStreams.html @@ -0,0 +1,178 @@ + + + + + + + PAMI.highUtilityPatternsInStreams package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.highUtilityPatternsInStreams package

+
+

Submodules

+
+
+

PAMI.highUtilityPatternsInStreams.HUPMS module

+
+
+

PAMI.highUtilityPatternsInStreams.SHUGrowth module

+
+
+

PAMI.highUtilityPatternsInStreams.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.highUtilitySpatialPattern.basic.html b/sphinx/_build/html/PAMI.highUtilitySpatialPattern.basic.html new file mode 100644 index 000000000..8c821f4d3 --- /dev/null +++ b/sphinx/_build/html/PAMI.highUtilitySpatialPattern.basic.html @@ -0,0 +1,695 @@ + + + + + + + PAMI.highUtilitySpatialPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.highUtilitySpatialPattern.basic package

+
+

Submodules

+
+
+

PAMI.highUtilitySpatialPattern.basic.HDSHUIM module

+
+
+class PAMI.highUtilitySpatialPattern.basic.HDSHUIM.HDSHUIM(iFile: str, nFile: str, minUtil: int, sep: str = '\t')[source]
+

Bases: _utilityPatterns

+
+
Description:
+

Spatial High Utility ItemSet Mining (SHUIM) [3] is an important model in data +mining with many real-world applications. It involves finding all spatially interesting itemSets having high value +in a quantitative spatio temporal database.

+
+
Reference:
+

P. Pallikila et al., “Discovering Top-k Spatial High Utility Itemsets in Very Large Quantitative Spatiotemporal +databases,” 2021 IEEE International Conference on Big Data (Big Data), Orlando, FL, USA, 2021, pp. 4925-4935, +doi: 10.1109/BigData52589.2021.9671912.

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of High Utility Spatial patterns

  • +
  • oFile – str : +Name of the output file to store complete set of High Utility Spatial patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float.

  • +
  • maxPer – float : +The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count.

  • +
  • minUtil – int : +Minimum utility threshold given by User

  • +
  • nFile – str : +Name of the input file to mine complete set of High Utility Spatial patterns

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilestr

Name of the input file to mine complete set of frequent patterns

+
+
oFilestr

Name of the output file to store complete set of frequent patterns

+
+
nFile: str

Name of Neighbourhood items file

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
minUtilint

The user given minUtil

+
+
mapFMAP: list

EUCS map of the FHM algorithm

+
+
candidates: int

candidates generated

+
+
huiCnt: int

huis created

+
+
neighbors: map

keep track of neighbours of elements

+
+
mapOfPMU: map

a map to keep track of Probable Maximum utility(PMU) of each item

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
constructCUL(x, compactUList, st, minUtil, length, exNeighbours)

A method to construct CUL’s database

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
Explore_SearchTree(prefix, uList, exNeighbours, minUtil)

A method to find all high utility itemSets

+
+
updateClosed(x, compactUList, st, exCul, newT, ex, eyTs, length)

A method to update closed values

+
+
saveItemSet(prefix, prefixLen, item, utility)

A method to save itemSets

+
+
updateElement(z, compactUList, st, exCul, newT, ex, duPrevPos, eyTs)

A method to updates vales for duplicates

+
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 HDSHUIM.py <inputFile> <outputFile> <Neighbours> <minUtil> <separator>
+
+Example Usage:
+
+(.venv) $ python3 HDSHUIM.py sampleTDB.txt output.txt sampleN.txt 35 ','
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Sample run of importing the code:

+
from PAMI.highUtilityGeoreferencedFrequentPattern.basic import HDSHUIM as alg
+
+obj=alg.HDSHUIM("input.txt","Neighbours.txt",35)
+
+obj.mine()
+
+Patterns = obj.getPatterns()
+
+print("Total number of Spatial High-Utility Patterns:", len(Patterns))
+
+obj.save("output")
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, str][source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() Dict[str, str][source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

main program to start the operation

+
+ +
+
+printResults() None[source]
+

This function is used to print the results

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

main program to start the operation

+
+ +
+
+ +
+
+

PAMI.highUtilitySpatialPattern.basic.SHUIM module

+
+
+class PAMI.highUtilitySpatialPattern.basic.SHUIM.SHUIM(iFile: str, nFile: str, minUtil: int, sep: str = '\t')[source]
+

Bases: _utilityPatterns

+
+
Description:
+

Spatial High Utility itemSet Mining (SHUIM) aims to discover all itemSets in a spatioTemporal database +that satisfy the user-specified minimum utility and maximum distance constraints

+
+
Reference:
+

Rage, Uday & Veena, Pamalla & Penugonda, Ravikumar & Raj, Bathala & Dao, Minh & Zettsu, Koji & Bommisetti, Sai. +(2023). HDSHUI-miner: a novel algorithm for discovering spatial high-utility itemsets in high-dimensional +spatiotemporal databases. Applied Intelligence. 53. 1-26. 10.1007/s10489-022-04436-w.

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of High Utility Spatial patterns

  • +
  • oFile – str : +Name of the output file to store complete set of High Utility Spatial patterns

  • +
  • minSup – int or float or str : +The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float.

  • +
  • maxPer – float : +The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count.

  • +
  • minUtil – int : +Minimum utility threshold given by User

  • +
  • maxMemory – int : +Maximum memory used by this program for running

  • +
  • candidateCount – int : +Number of candidates to consider when calculating a high utility spatial pattern

  • +
  • nFile – str : +Name of the input file to mine complete set of High Utility Spatial patterns

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the input file to mine complete set of frequent patterns

+
+
nFilefile

Name of the Neighbours file that contain neighbours of items

+
+
oFilefile

Name of the output file to store complete set of frequent patterns

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
minUtilint

The user given minUtil

+
+
highUtilityItemSets: map

set of high utility itemSets

+
+
candidateCount: int

Number of candidates

+
+
utilityBinArrayLU: list

A map to hold the pmu values of the items in database

+
+
utilityBinArraySU: list

A map to hold the subtree utility values of the items is database

+
+
oldNamesToNewNames: list

A map to hold the subtree utility values of the items is database

+
+
newNamesToOldNames: list

A map to store the old name corresponding to new name

+
+
Neighboursmap

A dictionary to store the neighbours of a item

+
+
+

maxMemory:Maximum memory used by this program for running +patternCount: int

+
+

Number of SHUI’s

+
+
+
itemsToKeep: list

keep only the promising items ie items having twu >= minUtil

+
+
itemsToExplore: list

keep items that subtreeUtility grater than minUtil

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
calculateNeighbourIntersection(self, prefixLength)

A method to return common Neighbours of items

+
+
backtrackingEFIM(transactionsOfP, itemsToKeep, itemsToExplore, prefixLength)

A method to mine the SHUIs Recursively

+
+
useUtilityBinArraysToCalculateUpperBounds(transactionsPe, j, itemsToKeep, neighbourhoodList)

A method to calculate the sub-tree utility and local utility of all items that can extend itemSet P and e

+
+
output(tempPosition, utility)

A method ave a high-utility itemSet to file or memory depending on what the user chose

+
+
_isEqual(transaction1, transaction2)

A method to Check if two transaction are identical

+
+
intersection(lst1, lst2)

A method that return the intersection of 2 list

+
+
useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(dataset)

Scan the initial database to calculate the subtree utility of each items using a utility-bin array

+
+
sortDatabase(self, transactions)

A Method to sort transaction in the order of PMU

+
+
sort_transaction(self, trans1, trans2)

A Method to sort transaction in the order of PMU

+
+
useUtilityBinArrayToCalculateLocalUtilityFirstTime(self, dataset)

A method to scan the database using utility bin array to calculate the pmus

+
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 SHUIM.py <inputFile> <outputFile> <Neighbours> <minUtil> <sep>
+
+Example Usage:
+
+(.venv) $ python3 SHUIM.py sampleTDB.txt output.txt sampleN.txt 35
+
+
+
+

Note

+

minSup will be considered in percentage of database transactions

+
+
+
+

Sample run of importing the code:

+
from PAMI.highUtilitySpatialPattern.basic import SHUIM as alg
+
+obj=alg.SHUIM("input.txt","Neighbours.txt",35)
+
+obj.mine()
+
+frequentPatterns = obj.getPatterns()
+
+print("Total number of Spatial high utility Patterns:", len(frequentPatterns))
+
+obj.save("output")
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by Pradeep Pallikila under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, str][source]
+

Function to send the set of patterns after completion of the mining process

+
+
Returns:
+

returning patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final patterns in a dataframe

+
+
Returns:
+

returning patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

main program to start the operation

+
+ +
+
+printResults() None[source]
+

This function is used to print the results

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

main program to start the operation

+
+ +
+
+ +
+
+

PAMI.highUtilitySpatialPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.highUtilitySpatialPattern.html b/sphinx/_build/html/PAMI.highUtilitySpatialPattern.html new file mode 100644 index 000000000..d8a5dc550 --- /dev/null +++ b/sphinx/_build/html/PAMI.highUtilitySpatialPattern.html @@ -0,0 +1,464 @@ + + + + + + + PAMI.highUtilitySpatialPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.highUtilitySpatialPattern package

+
+

Subpackages

+
+ +
+
+
+

Submodules

+
+
+

PAMI.highUtilitySpatialPattern.abstract module

+
+
+class PAMI.highUtilitySpatialPattern.abstract.utilityPatterns(iFile, nFile, minUtil, sep='\t')[source]
+

Bases: ABC

+
+
Description:
+

This abstract base class defines the variables and methods that every frequent pattern mining algorithm must +employ in PAMI

+
+
Attributes:
+
+
iFilestr

Input file name or path of the input file

+
+
minUtil: integer

The user can specify minUtil either in count

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator

+
+
startTime:float

To record the start time of the algorithm

+
+
endTime:float

To record the completion time of the algorithm

+
+
finalPatterns: dict

Storing the complete set of patterns in a dictionary variable

+
+
oFilestr

Name of the output file to store complete set of frequent patterns

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
+
+
Methods:
+
+
startMine()

Calling this function will start the actual mining process

+
+
getPatterns()

This function will output all interesting patterns discovered by an algorithm

+
+
save(oFile)

This function will store the discovered patterns in an output file specified by the user

+
+
getPatternsAsDataFrame()

The function outputs the patterns generated by an algorithm as a data frame

+
+
getMemoryUSS()

This function outputs the total amount of USS memory consumed by a mining algorithm

+
+
getMemoryRSS()

This function outputs the total amount of RSS memory consumed by a mining algorithm

+
+
getRuntime()

This function outputs the total runtime of a mining algorithm

+
+
+
+
+
+
+abstract endTime()[source]
+

Variable to store the end time of the complete program

+
+ +
+
+abstract finalPatterns()[source]
+

Variable to store the complete set of patterns in a dictionary

+
+ +
+
+abstract getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the program will be retrieved from this function

+
+ +
+
+abstract getMemoryUSS()[source]
+

Total amount of USS memory consumed by the program will be retrieved from this function

+
+ +
+
+abstract getPatterns()[source]
+

Complete set of frequent patterns generated will be retrieved from this function

+
+ +
+
+abstract getPatternsAsDataFrame()[source]
+

Complete set of frequent patterns will be loaded in to data frame from this function

+
+ +
+
+abstract getRuntime()[source]
+

Total amount of runtime taken by the program will be retrieved from this function

+
+ +
+
+abstract iFile()[source]
+

Variable to store the input file path/file name

+
+ +
+
+abstract memoryRSS()[source]
+

Variable to store RSS memory consumed by the program

+
+ +
+
+abstract memoryUSS()[source]
+

Variable to store USS memory consumed by the program

+
+ +
+
+abstract minUtil()[source]
+

Variable to store the user-specified minimum support value

+
+ +
+
+abstract nFile()[source]
+

Variable to store the neighbourhood file path/file name

+
+ +
+
+abstract oFile()[source]
+

Variable to store the name of the output file to store the complete set of frequent patterns

+
+ +
+
+abstract save(oFile)[source]
+

Complete set of frequent patterns will be saved in to an output file from this function

+
+
Parameters:
+

oFile (csv file) – Name of the output file

+
+
+
+ +
+
+abstract startMine()[source]
+

Code for the mining process will start from this function

+
+ +
+
+abstract startTime()[source]
+

Variable to store the start time of the mining process

+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.highUtilitySpatialPattern.topk.html b/sphinx/_build/html/PAMI.highUtilitySpatialPattern.topk.html new file mode 100644 index 000000000..1c9a43ba5 --- /dev/null +++ b/sphinx/_build/html/PAMI.highUtilitySpatialPattern.topk.html @@ -0,0 +1,1024 @@ + + + + + + + PAMI.highUtilitySpatialPattern.topk package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.highUtilitySpatialPattern.topk package

+
+

Submodules

+
+
+

PAMI.highUtilitySpatialPattern.topk.TKSHUIM module

+
+
+class PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Dataset(datasetpath, sep)[source]
+

Bases: object

+

A class represent the list of transactions in this dataset

+
+
Attributes:
+

+
+
+
transactions:

the list of transactions in this dataset

+
+
maxItem:

the largest item name

+
+
+
+
Methods:
+
+
createTransaction(line):

Create a transaction object from a line from the input file

+
+
getMaxItem():

return Maximum Item

+
+
getTransactions():

return transactions in database

+
+
+
+
+
+
+createTransaction(line)[source]
+

A method to create Transaction from dataset given

+
+
Parameters:
+

line (string) – represent a single line of database

+
+
+

:return : Transaction. +:rtype: int

+
+ +
+
+getMaxItem()[source]
+

A method to return name of the largest item

+
+ +
+
+getTransactions()[source]
+

A method to return transactions from database

+
+ +
+
+maxItem = 0
+
+ +
+
+transactions = []
+
+ +
+ +
+
+class PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM(iFile, nFile, k, sep='\t')[source]
+

Bases: utilityPatterns

+
+
Description:
+

Top K Spatial High Utility ItemSet Mining (TKSHUIM) aims to discover Top-K Spatial High Utility Itemsets +(TKSHUIs) in a spatioTemporal database

+
+
Reference:
+

P. Pallikila et al., “Discovering Top-k Spatial High Utility Itemsets in Very Large Quantitative Spatiotemporal +databases,” 2021 IEEE International Conference on Big Data (Big Data), Orlando, FL, USA, 2021, pp. 4925-4935, +doi: 10.1109/BigData52589.2021.9671912.

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of High Utility Spatial patterns

  • +
  • oFile – str : +Name of the output file to store complete set of High Utility Spatial patterns

  • +
  • minUtil – int : +Minimum utility threshold given by User

  • +
  • maxMemory – int : +Maximum memory used by this program for running

  • +
  • candidateCount – int : +Number of candidates to consider when calculating a high utility spatial pattern

  • +
  • nFile – str : +Name of the input file to mine complete set of High Utility Spatial patterns

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the input file to mine complete set of frequent patterns

+
+
nFilefile

Name of the Neighbours file that contain neighbours of items

+
+
oFilefile

Name of the output file to store complete set of frequent patterns

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
kint

The user given k value

+
+
candidateCount: int

Number of candidates

+
+
utilityBinArrayLU: list

A map to hold the pmu values of the items in database

+
+
utilityBinArraySU: list

A map to hold the subtree utility values of the items is database

+
+
oldNamesToNewNames: list

A map to hold the subtree utility values of the items is database

+
+
newNamesToOldNames: list

A map to store the old name corresponding to new name

+
+
Neighboursmap

A dictionary to store the neighbours of a item

+
+
maxMemory: float

Maximum memory used by this program for running

+
+
itemsToKeep: list

keep only the promising items ie items having twu >= minUtil

+
+
itemsToExplore: list

keep items that subtreeUtility grater than minUtil

+
+
+
+
Methods:
+
+
mine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
calculateNeighbourIntersection(self, prefixLength)

A method to return common Neighbours of items

+
+
backtrackingEFIM(transactionsOfP, itemsToKeep, itemsToExplore, prefixLength)

A method to mine the TKSHUIs Recursively

+
+
useUtilityBinArraysToCalculateUpperBounds(transactionsPe, j, itemsToKeep, neighbourhoodList)

A method to calculate the sub-tree utility and local utility of all items that can extend itemSet P and e

+
+
output(tempPosition, utility)

A method ave a high-utility itemSet to file or memory depending on what the user chose

+
+
is_equal(transaction1, transaction2)

A method to Check if two transaction are identical

+
+
intersection(lst1, lst2)

A method that return the intersection of 2 list

+
+
useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(dataset)

Scan the initial database to calculate the subtree utility of each items using a utility-bin array

+
+
sortDatabase(self, transactions)

A Method to sort transaction in the order of PMU

+
+
sort_transaction(self, trans1, trans2)

A Method to sort transaction in the order of PMU

+
+
useUtilityBinArrayToCalculateLocalUtilityFirstTime(self, dataset)

A method to scan the database using utility bin array to calculate the pmus

+
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 TKSHUIM.py <inputFile> <outputFile> <Neighbours> <k> <sep>
+
+Example Usage:
+
+(.venv) $ python3 TKSHUIM.py sampleTDB.txt output.txt sampleN.txt 35
+
+
+
+

Note

+

maxMemory will be considered as Maximum memory used by this program for running

+
+
+
+

Sample run of importing the code:

+
from PAMI.highUtilitySpatialPattern.topk import TKSHUIM as alg
+
+obj=alg.TKSHUIM("input.txt","Neighbours.txt",35)
+
+obj.mine()
+
+Patterns = obj.getPatterns()
+
+obj.save("output")
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by Pradeep Pallikila under the supervision of Professor Rage Uday Kiran.

+
+
+
+Neighbours = {}
+
+ +
+
+additemset(itemset, utility)[source]
+

adds the itemset to the priority queue

+
+
Parameters:
+
    +
  • itemset (str) – the itemset to be added

  • +
  • utility (numpy.array) – utility matrix for the itemset to be added

  • +
+
+
+
+ +
+
+backtrackingEFIM(transactionsOfP, itemsToKeep, itemsToExplore, prefixLength)[source]
+

A method to mine the TKSHUIs Recursively

+
+
Parameters:
+
    +
  • transactionsOfP (list) – the list of transactions containing the current prefix P

  • +
  • itemsToKeep (list) – the list of secondary items in the p-projected database

  • +
  • itemsToExplore (list) – the list of primary items in the p-projected database

  • +
  • prefixLength (int) – current prefixLength

  • +
+
+
+
+ +
+
+calculateNeighbourIntersection(prefixLength)[source]
+

A method to find common Neighbours

+
+
Parameters:
+

prefixLength – the prefix itemSet

+
+
+

:type prefixLength:int

+
+ +
+
+candidateCount = 0
+
+ +
+
+endTime = 0.0
+
+ +
+
+finalPatterns = {}
+
+ +
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of patterns after completion of the mining process

+
+
Returns:
+

returning patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final patterns in a dataframe

+
+
Returns:
+

returning patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+heapList = []
+
+ +
+
+iFile = ' '
+
+ +
+
+intTostr = {}
+
+ +
+
+intersection(lst1, lst2)[source]
+

A method that return the intersection of 2 list

+
+
Parameters:
+
    +
  • lst1 (list) – items neighbour to item1

  • +
  • lst2 (list) – items neighbour to item2

  • +
+
+
+

:return :intersection of two lists +:rtype : list

+
+ +
+
+is_equal(transaction1, transaction2)[source]
+

A method to Check if two transaction are identical

+
+
Parameters:
+
    +
  • transaction1 (Transaction) – the first transaction.

  • +
  • transaction2 (Transaction) – the second transaction.

  • +
+
+
+

:return : whether both are identical or not +:rtype: bool

+
+ +
+
+maxMemory = 0
+
+ +
+
+memoryRSS = 0.0
+
+ +
+
+memoryUSS = 0.0
+
+ +
+
+minUtil = 0
+
+ +
+
+mine()[source]
+

Main function of the program.

+
+ +
+
+nFile = ' '
+
+ +
+
+newNamesToOldNames = {}
+
+ +
+
+oFile = ' '
+
+ +
+
+oldNamesToNewNames = {}
+
+ +
+
+output(tempPosition, utility)[source]
+

A method save all high-utility itemSet to file or memory depending on what the user chose

+
+
Parameters:
+

tempPosition – position of last item

+
+
+

:type tempPosition : int +:param utility: total utility of itemSet +:type utility: int

+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+sep = '\t'
+
+ +
+
+sortDatabase(transactions)[source]
+

A Method to sort transaction in the order of PMU

+
+
Parameters:
+

transactions (Transaction) – transaction of items

+
+
Returns:
+

sorted transaction

+
+
Return type:
+

Transaction

+
+
+
+ +
+
+sort_transaction(trans1, trans2)[source]
+

A Method to sort transaction in the order of PMU

+
+
Parameters:
+

trans1 (Transaction) – the first transaction.

+
+
+

:param trans2:the second transaction. +:type trans2: Transaction +:return: sorted transaction. +:rtype: int

+
+ +
+
+startMine()[source]
+

Main function of the program.

+
+ +
+
+startTime = 0.0
+
+ +
+
+strToint = {}
+
+ +
+
+temp = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+
+ +
+
+useUtilityBinArrayToCalculateLocalUtilityFirstTime(dataset)[source]
+

A method to scan the database using utility bin array to calculate the pmus

+
+
Parameters:
+

dataset (database) – the transaction database.

+
+
+
+ +
+
+useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(dataset)[source]
+

Scan the initial database to calculate the subtree utility of each item using a utility-bin array

+
+
Parameters:
+

dataset (Dataset) – the transaction database

+
+
+
+ +
+
+useUtilityBinArraysToCalculateUpperBounds(transactionsPe, j, itemsToKeep, neighbourhoodList)[source]
+

A method to calculate the sub-tree utility and local utility of all items that can extend itemSet P U {e}

+
+
Parameters:
+

transactionsPe (list) – transactions the projected database for P U {e}

+
+
+

:param j:the position of j in the list of promising items +:type j:int +:param itemsToKeep :the list of promising items +:type itemsToKeep: list +:param neighbourhoodList: list of neighbourhood elements +:type neighbourhoodList: list

+
+ +
+
+utilityBinArrayLU = {}
+
+ +
+
+utilityBinArraySU = {}
+
+ +
+
+ +
+
+class PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Transaction(items, utilities, transactionUtility, pmus=None)[source]
+

Bases: object

+

A class to store Transaction of a database

+
+
Attributes:
+
+
items: list

A list of items in transaction

+
+
utilities: list

A list of utilites of items in transaction

+
+
transactionUtility: int

represent total sum of all utilities in the database

+
+
pmus: list

represent the pmu (probable maximum utility) of each element in the transaction

+
+
prefixutility:

prefix Utility values of item

+
+
offset:

an offset pointer, used by projected transactions

+
+
+
+
Methods:
+
+
projectedTransaction(offsetE):

A method to create new Transaction from existing till offsetE

+
+
getItems():

return items in transaction

+
+
getUtilities():

return utilities in transaction

+
+
getPmus():

return pmus in transaction

+
+
getLastPosition():

return last position in a transaction

+
+
removeUnpromisingItems():

A method to remove items with low Utility than minUtil

+
+
insertionSort():

A method to sort all items in the transaction

+
+
+
+
+
+
+getItems()[source]
+

A method to return items in transaction

+
+ +
+
+getLastPosition()[source]
+

A method to return last position in a transaction

+
+ +
+
+getPmus()[source]
+

A method to return pmus in transaction

+
+ +
+
+getUtilities()[source]
+

A method to return utilities in transaction

+
+ +
+
+insertionSort()[source]
+

A method to sort items in order

+
+ +
+
+offset = 0
+
+ +
+
+prefixUtility = 0
+
+ +
+
+projectTransaction(offsetE)[source]
+

A method to create new Transaction from existing till offsetE

+
+
Parameters:
+

offsetE (int) – an offset over the original transaction for projecting the transaction

+
+
+
+ +
+
+removeUnpromisingItems(oldNamesToNewNames)[source]
+

A method to remove items with low Utility than minUtil

+
+
Parameters:
+

oldNamesToNewNames (map) – A map represent old namses to new names

+
+
+
+ +
+ +
+
+PAMI.highUtilitySpatialPattern.topk.TKSHUIM.main()[source]
+
+ +
+
+

PAMI.highUtilitySpatialPattern.topk.abstract module

+
+
+class PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns(iFile, nFile, k, sep='\t')[source]
+

Bases: ABC

+
+
Description:
+

This abstract base class defines the variables and methods that every topk spatial high utility pattern mining algorithm must +employ in PAMI

+
+
Attributes:
+
+
iFilestr

Input file name or path of the input file

+
+
k: integer

The user can specify k (top-k)

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator

+
+
startTime:float

To record the start time of the algorithm

+
+
endTime:float

To record the completion time of the algorithm

+
+
finalPatterns: dict

Storing the complete set of patterns in a dictionary variable

+
+
oFilestr

Name of the output file to store complete set of frequent patterns

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
+
+
Methods:
+
+
startMine()

Calling this function will start the actual mining process

+
+
getPatterns()

This function will output all interesting patterns discovered by an algorithm

+
+
save(oFile)

This function will store the discovered patterns in an output file specified by the user

+
+
getPatternsAsDataFrame()

The function outputs the patterns generated by an algorithm as a data frame

+
+
getMemoryUSS()

This function outputs the total amount of USS memory consumed by a mining algorithm

+
+
getMemoryRSS()

This function outputs the total amount of RSS memory consumed by a mining algorithm

+
+
getRuntime()

This function outputs the total runtime of a mining algorithm

+
+
+
+
+
+
+abstract endTime()[source]
+

Variable to store the end time of the complete program

+
+ +
+
+abstract finalPatterns()[source]
+

Variable to store the complete set of patterns in a dictionary

+
+ +
+
+abstract getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the program will be retrieved from this function

+
+ +
+
+abstract getMemoryUSS()[source]
+

Total amount of USS memory consumed by the program will be retrieved from this function

+
+ +
+
+abstract getPatterns()[source]
+

Complete set of patterns generated will be retrieved from this function

+
+ +
+
+abstract getPatternsAsDataFrame()[source]
+

Complete set of generated patterns will be loaded in to data frame from this function

+
+ +
+
+abstract getRuntime()[source]
+

Total amount of runtime taken by the program will be retrieved from this function

+
+ +
+
+abstract iFile()[source]
+

Variable to store the input file path/file name

+
+ +
+
+abstract memoryRSS()[source]
+

Variable to store RSS memory consumed by the program

+
+ +
+
+abstract memoryUSS()[source]
+

Variable to store USS memory consumed by the program

+
+ +
+
+abstract nFile()[source]
+

Variable to store the neighbourhood file path/file name

+
+ +
+
+abstract oFile()[source]
+

Variable to store the name of the output file to store the complete set of frequent patterns

+
+ +
+
+abstract printResults()[source]
+

To print all the results of execution

+
+ +
+
+abstract save(oFile)[source]
+

Complete set of patterns will be saved in to an output file from this function

+
+
Parameters:
+

oFile (csv file) – Name of the output file

+
+
+
+ +
+
+abstract startMine()[source]
+

Code for the mining process will start from this function

+
+ +
+
+abstract startTime()[source]
+

Variable to store the start time of the mining process

+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.html b/sphinx/_build/html/PAMI.html new file mode 100644 index 000000000..7f2292790 --- /dev/null +++ b/sphinx/_build/html/PAMI.html @@ -0,0 +1,1159 @@ + + + + + + + PAMI package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI package

+
+

Subpackages

+
+ +
+
+
+

Module contents

+

PAMI

+

A PAttern MIning python library.

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.localPeriodicPattern.basic.html b/sphinx/_build/html/PAMI.localPeriodicPattern.basic.html new file mode 100644 index 000000000..cbbcb253c --- /dev/null +++ b/sphinx/_build/html/PAMI.localPeriodicPattern.basic.html @@ -0,0 +1,1055 @@ + + + + + + + PAMI.localPeriodicPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.localPeriodicPattern.basic package

+
+

Submodules

+
+
+

PAMI.localPeriodicPattern.basic.LPPGrowth module

+
+
+class PAMI.localPeriodicPattern.basic.LPPGrowth.LPPGrowth(iFile, maxPer, maxSoPer, minDur, sep='\t')[source]
+

Bases: _localPeriodicPatterns

+
+
Description:
+

Local Periodic Patterns, which are patterns (sets of events) that have a periodic behavior in some non predefined +time-intervals. A pattern is said to be a local periodic pattern if it appears regularly and continuously in some +time-intervals. The maxSoPer (maximal period of spillovers) measure allows detecting time-intervals of variable +lengths where a pattern is continuously periodic, while the minDur (minimal duration) measure ensures that those +time-intervals have a minimum duration.

+
+
Reference:
+

Fournier-Viger, P., Yang, P., Kiran, R. U., Ventura, S., Luna, J. M.. (2020). Mining Local Periodic Patterns in +a Discrete Sequence. Information Sciences, Elsevier, to appear. [ppt] DOI: 10.1016/j.ins.2020.09.044

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of local periodic pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of local periodic patterns

  • +
  • minDur – str: +Minimal duration in seconds between consecutive periods of time-intervals where a pattern is continuously periodic.

  • +
  • maxPer – float: +Controls the maximum number of transactions in which any two items within a pattern can reappear.

  • +
  • maxSoPer – float: +Controls the maximum number of time periods between consecutive periods of time-intervals where a pattern is continuously periodic.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilestr

Input file name or path of the input file

+
+
oFilestr

Output file name or path of the output file

+
+
maxPerfloat

User defined maxPer value.

+
+
maxSoPerfloat

User defined maxSoPer value.

+
+
minDurfloat

User defined minDur value.

+
+
tsMinint / date

First time stamp of input data.

+
+
tsMaxint / date

Last time stamp of input data.

+
+
startTimefloat

Time when start of execution the algorithm.

+
+
endTimefloat

Time when end of execution the algorithm.

+
+
finalPatternsdict

To store local periodic patterns and its PTL.

+
+
tsListdict

To store items and its time stamp as bit vector.

+
+
rootTree

It is root node of transaction tree of whole input data.

+
+
PTLdict

Storing the item and its PTL.

+
+
itemslist

Storing local periodic item list.

+
+
sep: str

separator used to distinguish items from each other. The default separator is tab space.

+
+
+
+
Methods:
+
+
findSeparator(line)

Find the separator of the line which split strings.

+
+
creteLPPlist()

Create the local periodic patterns list from input data.

+
+
createTSList()

Create the tsList as bit vector from input data.

+
+
generateLPP()

Generate 1 length local periodic pattens by tsList and execute depth first search.

+
+
createLPPTree()

Create LPPTree of local periodic item from input data.

+
+
patternGrowth(tree, prefix, prefixPFList)

Execute pattern growth algorithm. It is important function in this program.

+
+
calculatePTL(tsList)

Calculate PTL from input tsList as integer list.

+
+
calculatePTLbit(tsList)

Calculate PTL from input tsList as bit vector.

+
+
mine()

Mining process will start from here.

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function.

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function.

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function.

+
+
getLocalPeriodicPatterns()

return local periodic patterns and its PTL

+
+
save(oFile)

Complete set of local periodic patterns will be loaded in to an output file.

+
+
getPatternsAsDataFrame()

Complete set of local periodic patterns will be loaded in to a dataframe.

+
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 LPPMGrowth.py <inputFile> <outputFile> <maxPer> <minSoPer> <minDur>
+
+Example Usage:
+
+(.venv) $ python3 LPPMGrowth.py sampleDB.txt patterns.txt 0.3 0.4 0.5
+
+
+
+
+

Sample run of importing the code:

+
from PAMI.localPeriodicPattern.basic import LPPGrowth as alg
+
+obj = alg.LPPGrowth(iFile, maxPer, maxSoPer, minDur)
+
+obj.mine()
+
+localPeriodicPatterns = obj.getPatterns()
+
+print(f'Total number of local periodic patterns: {len(localPeriodicPatterns)}')
+
+obj.save(oFile)
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print(f'Total memory in USS: {memUSS}')
+
+memRSS = obj.getMemoryRSS()
+
+print(f'Total memory in RSS: {memRSS}')
+
+runtime = obj.getRuntime()
+
+print(f'Total execution time in seconds: {runtime})
+
+
+
+
+

Credits:

+
+

The complete program was written by So Nakamura under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[source]
+

Function to send the set of local periodic patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final local periodic patterns in a dataframe

+
+
Returns:
+

returning local periodic patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Mining process start from here.

+
+ +
+
+printResults() None[source]
+

This function is used to print the results

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of local periodic patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Mining process start from here.

+
+ +
+
+ +
+
+class PAMI.localPeriodicPattern.basic.LPPGrowth.Node[source]
+

Bases: object

+

A class used to represent the node of localPeriodicPatternTree

+
+
Attributes:
+
+
itemint

storing item of a node

+
+
parentnode

To maintain the parent of every node

+
+
childlist

To maintain the children of node

+
+
nodeLinknode

To maintain the next node of node

+
+
tidListset

To maintain timestamps of node

+
+
+
+
Methods:
+
+
getChild(itemName)

storing the children to their respective parent nodes

+
+
+
+
+
+
+getChild(item: int) Node[source]
+

This function is used to get child node from the parent node

+
+
Parameters:
+

item (int) – item of the parent node

+
+
Returns:
+

if node have node of item, then return it. if node don’t have return []

+
+
Return type:
+

Node

+
+
+
+ +
+ +
+
+class PAMI.localPeriodicPattern.basic.LPPGrowth.Tree[source]
+

Bases: object

+

A class used to represent the frequentPatternGrowth tree structure

+
+
Attributes:
+
+
rootnode

Represents the root node of the tree

+
+
nodeLinksdictionary

storing last node of each item

+
+
firstNodeLinkdictionary

storing first node of each item

+
+
+
+
Methods:
+
+
addTransaction(transaction,timeStamp)

creating transaction as a branch in frequentPatternTree

+
+
fixNodeLinks(itemName, newNode)

add newNode link after last node of item

+
+
deleteNode(itemName)

delete all node of item

+
+
createPrefixTree(path,timeStampList)

create prefix tree by path

+
+
+
+
+
+
+addTransaction(transaction: List[int], tid: int) None[source]
+

add transaction into tree

+
+
Parameters:
+
    +
  • transaction (list) – it represents the one transaction in database

  • +
  • tid (list or int) – represents the timestamp of transaction

  • +
+
+
Returns:
+

None

+
+
+
+ +
+
+createPrefixTree(path: List[int], tidList: List[int]) None[source]
+

create prefix tree by path

+
+
Parameters:
+
    +
  • path (list) – it represents path to root from prefix node

  • +
  • tidList (list) – it represents tid of each item

  • +
+
+
Returns:
+

None

+
+
+
+ +
+
+deleteNode(item: int) None[source]
+

delete the node from tree

+
+
Parameters:
+

item (str) – it represents the item name of node

+
+
Returns:
+

None

+
+
+
+ +
+ +

fix node link

+
+
Parameters:
+
    +
  • item (string) – it represents item name of newNode

  • +
  • newNode (Node) – it represents node which is added

  • +
+
+
Returns:
+

None

+
+
+
+ +
+ +
+
+

PAMI.localPeriodicPattern.basic.LPPMBreadth module

+
+
+class PAMI.localPeriodicPattern.basic.LPPMBreadth.LPPMBreadth(iFile, maxPer, maxSoPer, minDur, sep='\t')[source]
+

Bases: _localPeriodicPatterns

+
+
Description:
+

Local Periodic Patterns, which are patterns (sets of events) that have a periodic behavior in some non predefined +time-intervals. A pattern is said to be a local periodic pattern if it appears regularly and continuously in some +time-intervals. The maxSoPer (maximal period of spillovers) measure allows detecting time-intervals of variable +lengths where a pattern is continuously periodic, while the minDur (minimal duration) measure ensures that those +time-intervals have a minimum duration.

+
+
Reference:
+

Fournier-Viger, P., Yang, P., Kiran, R. U., Ventura, S., Luna, J. M.. (2020). Mining Local Periodic Patterns in +a Discrete Sequence. Information Sciences, Elsevier, to appear. [ppt] DOI: 10.1016/j.ins.2020.09.044

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of local periodic pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of local periodic patterns

  • +
  • minDur – str: +Minimal duration in seconds between consecutive periods of time-intervals where a pattern is continuously periodic.

  • +
  • maxPer – float: +Controls the maximum number of transactions in which any two items within a pattern can reappear.

  • +
  • maxSoPer – float: +Controls the maximum number of time periods between consecutive periods of time-intervals where a pattern is continuously periodic.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilestr

Input file name or path of the input file

+
+
oFilestr

Output file name or path of the output file

+
+
maxPerfloat

User defined maxPer value.

+
+
maxSoPerfloat

User defined maxSoPer value.

+
+
minDurfloat

User defined minDur value.

+
+
tsMinint / date

First time stamp of input data.

+
+
tsMaxint / date

Last time stamp of input data.

+
+
startTimefloat

Time when start of execution the algorithm.

+
+
endTimefloat

Time when end of execution the algorithm.

+
+
finalPatternsdict

To store local periodic patterns and its PTL.

+
+
tsListdict

To store items and its time stamp as bit vector.

+
+
sep: str

separator used to distinguish items from each other. The default separator is tab space.

+
+
+
+
Methods:
+
+
createTSList()

Create the tsList as bit vector from input data.

+
+
generateLPP()

Generate 1 length local periodic pattens by tsList and execute depth first search.

+
+
calculatePTL(tsList)

Calculate PTL from input tsList as bit vector

+
+
LPPMBreathSearch(extensionOfP)

Mining local periodic patterns using breadth first search.

+
+
mine()

Mining process will start from here.

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function.

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function.

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function.

+
+
getLocalPeriodicPatterns()

return local periodic patterns and its PTL

+
+
save(oFile)

Complete set of local periodic patterns will be loaded in to an output file.

+
+
getPatternsAsDataFrame()

Complete set of local periodic patterns will be loaded in to a dataframe.

+
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 LPPBreadth.py <inputFile> <outputFile> <maxPer> <minSoPer> <minDur>
+
+Example Usage:
+
+(.venv) $ python3 LPPMBreadth.py sampleDB.txt patterns.txt 0.3 0.4 0.5
+
+
+
+
+

Sample run of importing the code:

+
from PAMI.localPeriodicPattern.basic import LPPMBreadth as alg
+
+obj = alg.LPPMBreadth(iFile, maxPer, maxSoPer, minDur)
+
+obj.mine()
+
+localPeriodicPatterns = obj.getPatterns()
+
+print(f'Total number of local periodic patterns: {len(localPeriodicPatterns)}')
+
+obj.save(oFile)
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print(f'Total memory in USS: {memUSS}')
+
+memRSS = obj.getMemoryRSS()
+
+print(f'Total memory in RSS: {memRSS}')
+
+runtime = obj.getRuntime()
+
+print(f'Total execution time in seconds: {runtime})
+
+
+
+
+

Credits:

+
+

The complete program was written by So Nakamura under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[Tuple[str, ...] | str, Set[Tuple[int, int]]][source]
+

Function to send the set of local periodic patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final local periodic patterns in a dataframe

+
+
Returns:
+

returning local periodic patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Mining process start from here.

+
+ +
+
+printResults() None[source]
+

This function is used to print the results

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of local periodic patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Mining process start from here.

+
+ +
+
+ +
+
+

PAMI.localPeriodicPattern.basic.LPPMDepth module

+
+
+class PAMI.localPeriodicPattern.basic.LPPMDepth.LPPMDepth(iFile, maxPer, maxSoPer, minDur, sep='\t')[source]
+

Bases: _localPeriodicPatterns

+
+
Description:
+

Local Periodic Patterns, which are patterns (sets of events) that have a periodic behavior in some non predefined +time-intervals. A pattern is said to be a local periodic pattern if it appears regularly and continuously in some +time-intervals. The maxSoPer (maximal period of spillovers) measure allows detecting time-intervals of variable +lengths where a pattern is continuously periodic, while the minDur (minimal duration) measure ensures that those +time-intervals have a minimum duration.

+
+
Reference:
+

Fournier-Viger, P., Yang, P., Kiran, R. U., Ventura, S., Luna, J. M.. (2020). Mining Local Periodic Patterns in +a Discrete Sequence. Information Sciences, Elsevier, to appear. [ppt] DOI: 10.1016/j.ins.2020.09.044

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of local periodic pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of local periodic patterns

  • +
  • minDur – str: +Minimal duration in seconds between consecutive periods of time-intervals where a pattern is continuously periodic.

  • +
  • maxPer – float: +Controls the maximum number of transactions in which any two items within a pattern can reappear.

  • +
  • maxSoPer – float: +Controls the maximum number of time periods between consecutive periods of time-intervals where a pattern is continuously periodic.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilestr

Input file name or path of the input file

+
+
oFilestr

Output file name or path of the output file

+
+
maxPerfloat

User defined maxPer value.

+
+
maxSoPerfloat

User defined maxSoPer value.

+
+
minDurfloat

User defined minDur value.

+
+
tsminint / date

First time stamp of input data.

+
+
tsmaxint / date

Last time stamp of input data.

+
+
startTimefloat

Time when start of execution the algorithm.

+
+
endTimefloat

Time when end of execution the algorithm.

+
+
finalPatternsdict

To store local periodic patterns and its PTL.

+
+
tsListdict

To store items and its time stamp as bit vector.

+
+
sepstr

separator used to distinguish items from each other. The default separator is tab space.

+
+
+
+
Methods:
+
+
createTSlist()

Create the TSlist as bit vector from input data.

+
+
generateLPP()

Generate 1 length local periodic pattens by TSlist and execute depth first search.

+
+
calculatePTL(tsList)

Calculate PTL from input tsList as bit vector

+
+
LPPMDepthSearch(extensionOfP)

Mining local periodic patterns using depth first search.

+
+
mine()

Mining process will start from here.

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function.

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function.

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function.

+
+
getLocalPeriodicPatterns()

return local periodic patterns and its PTL

+
+
save(oFile)

Complete set of local periodic patterns will be loaded in to an output file.

+
+
getPatternsAsDataFrame()

Complete set of local periodic patterns will be loaded in to a dataframe.

+
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 LPPMDepth.py <inputFile> <outputFile> <maxPer> <minSoPer> <minDur>
+
+Example Usage:
+
+(.venv) $ python3 LPPMDepth.py sampleDB.txt patterns.txt 0.3 0.4 0.5
+
+
+
+
+

Sample run of importing the code:

+
from PAMI.localPeriodicPattern.basic import LPPMDepth as alg
+
+obj = alg.LPPMDepth(iFile, maxPer, maxSoPer, minDur)
+
+obj.mine()
+
+localPeriodicPatterns = obj.getPatterns()
+
+print(f'Total number of local periodic patterns: {len(localPeriodicPatterns)}')
+
+obj.save(oFile)
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print(f'Total memory in USS: {memUSS}')
+
+memRSS = obj.getMemoryRSS()
+
+print(f'Total memory in RSS: {memRSS}')
+
+runtime = obj.getRuntime()
+
+print(f'Total execution time in seconds: {runtime})
+
+
+
+
+

Credits:

+
+

The complete program was written by So Nakamura under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[Tuple[str, ...] | str, Set[Tuple[int, int]]][source]
+

Function to send the set of local periodic patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final local periodic patterns in a dataframe

+
+
Returns:
+

returning local periodic patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Mining process start from here. This function calls createTSlist and generateLPP.

+
+ +
+
+printResults() None[source]
+

This function is used to print the results

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of local periodic patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Mining process start from here. This function calls createTSlist and generateLPP.

+
+ +
+
+ +
+
+

PAMI.localPeriodicPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.localPeriodicPattern.html b/sphinx/_build/html/PAMI.localPeriodicPattern.html new file mode 100644 index 000000000..cd6330ba0 --- /dev/null +++ b/sphinx/_build/html/PAMI.localPeriodicPattern.html @@ -0,0 +1,235 @@ + + + + + + + PAMI.localPeriodicPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.multipleMinimumSupportBasedFrequentPattern.basic.html b/sphinx/_build/html/PAMI.multipleMinimumSupportBasedFrequentPattern.basic.html new file mode 100644 index 000000000..30a97bd15 --- /dev/null +++ b/sphinx/_build/html/PAMI.multipleMinimumSupportBasedFrequentPattern.basic.html @@ -0,0 +1,647 @@ + + + + + + + PAMI.multipleMinimumSupportBasedFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.multipleMinimumSupportBasedFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth module

+
+
+class PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth.CFPGrowth(iFile, MIS, sep='\t')[source]
+

Bases: _frequentPatterns

+
+
Description:
+

basic is one of the fundamental algorithm to discover frequent patterns based on multiple minimum support in a transactional database.

+
+
Reference:
+

Ya-Han Hu and Yen-Liang Chen. 2006. Mining association rules with multiple minimum supports: a new mining algorithm and a support tuning mechanism. +Decis. Support Syst. 42, 1 (October 2006), 1–24. https://doi.org/10.1016/j.dss.2004.09.007

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of Uncertain Minimum Support Based Frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of Uncertain Minimum Support Based Frequent patterns

  • +
  • minSup – str: +minimum support thresholds were tuned to find the appropriate ranges in the limited memory

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Input file name or path of the input file

+
+
MIS: file or dictionary

Multiple minimum supports of all items in the database

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default separator is tab space or . +However, the users can override their default separator.

+
+
oFilefile

Name of the output file or the path of the output file

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transactions

+
+
treeclass

it represents the Tree class

+
+
finalPatternsdict

it represents to store the patterns

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to an output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets()

Scans the dataset or dataframes and stores in list format

+
+
frequentOneItem()

Extracts the one-frequent patterns from transactions

+
+
+
+
+
+

Executing the code on terminal:

+
 Format:
+
+(.venv) $ python3 CFPGrowth.py <inputFile> <outputFile>
+
+Examples:
+
+(.venv) $  python3 CFPGrowth.py sampleDB.txt patterns.txt MISFile.txt
+
+
+        .. note:: minSup  will be considered in support count or frequency
+
+
+
+
+

Sample run of the importing code:

+
from PAMI.multipleMinimumSupportBasedFrequentPattern.basic import basic as alg
+
+obj = alg.basic(iFile, mIS)
+
+obj.startMine()
+
+frequentPatterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(frequentPatterns))
+
+obj.save(oFile)
+
+Df = obj.getPatternInDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+Mine() None[source]
+

main program to start the operation +:return: none

+
+ +
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, int][source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+printResults() None[source]
+

this function is used to print the results +:return: None

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

main program to start the operation +:return: none

+
+ +
+
+ +
+
+

PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus module

+
+
+class PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus.CFPGrowthPlus(iFile, MIS, sep='\t')[source]
+

Bases: _frequentPatterns

+
+
Description:
+

+
Reference:
+

R. Uday Kiran P. Krishna Reddy Novel techniques to reduce search space in multiple minimum supports-based frequent +pattern mining algorithms. 11-20 2011 EDBT https://doi.org/10.1145/1951365.1951370

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of Uncertain Multiple Minimum Support Based Frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of Uncertain Minimum Support Based Frequent patterns

  • +
  • minSup – str: +minimum support thresholds were tuned to find the appropriate ranges in the limited memory

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Input file name or path of the input file

+
+
MIS: file or dictionary

Multiple minimum supports of all items in the database

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default separator is tab space or . +However, the users can override their default separator.

+
+
oFilefile

Name of the output file or the path of the output file

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transactions

+
+
treeclass

it represents the Tree class

+
+
finalPatternsdict

it represents to store the patterns

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
savePatterns(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets()

Scans the dataset or dataframes and stores in list format

+
+
frequentOneItem()

Extracts the one-frequent patterns from transactions

+
+
+
+
+
+

Executing the code on terminal:

+
+
Format:
+
+(.venv) $ python3 CFPGrowthPlus.py <inputFile> <outputFile>
+
+Examples:
+
+(.venv) $ python3 CFPGrowthPlus.py sampleDB.txt patterns.txt MISFile.txt
+
+
+         .. note:: minSup  will be considered in support count or frequency
+
+
+
+
+
+

Sample run of the importing code:

+
from PAMI.multipleMinimumSupportBasedFrequentPattern.basic import CFPGrowthPlus as alg
+
+obj = alg.CFPGrowthPlus(iFile, mIS)
+
+obj.startMine()
+
+frequentPatterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(frequentPatterns))
+
+obj.savePatterns(oFile)
+
+Df = obj.getPatternInDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+Mine()[source]
+

main program to start the operation

+
+ +
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+printResults() None[source]
+

this function is used to print the results +:return: None

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to a output file

+
+
Parameters:
+

outFile (file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

main program to start the operation

+
+ +
+
+ +
+
+

PAMI.multipleMinimumSupportBasedFrequentPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.multipleMinimumSupportBasedFrequentPattern.html b/sphinx/_build/html/PAMI.multipleMinimumSupportBasedFrequentPattern.html new file mode 100644 index 000000000..cdabb971f --- /dev/null +++ b/sphinx/_build/html/PAMI.multipleMinimumSupportBasedFrequentPattern.html @@ -0,0 +1,209 @@ + + + + + + + PAMI.multipleMinimumSupportBasedFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.partialPeriodicFrequentPattern.basic.html b/sphinx/_build/html/PAMI.partialPeriodicFrequentPattern.basic.html new file mode 100644 index 000000000..7525ba50a --- /dev/null +++ b/sphinx/_build/html/PAMI.partialPeriodicFrequentPattern.basic.html @@ -0,0 +1,280 @@ + + + + + + + PAMI.partialPeriodicFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.partialPeriodicFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.partialPeriodicFrequentPattern.basic.GPFgrowth module

+
+
+

PAMI.partialPeriodicFrequentPattern.basic.PPF_DFS module

+
+
+

PAMI.partialPeriodicFrequentPattern.basic.abstract module

+
+
+class PAMI.partialPeriodicFrequentPattern.basic.abstract.partialPeriodicPatterns(iFile, minSup, maxPer, minPR, sep='\t')[source]
+

Bases: ABC

+
+
Description:
+

This abstract base class defines the variables and methods that every partial periodic pattern mining algorithm must +employ in PAMI

+
+
Attributes:
+
+
iFilestr

Input file name or path of the input file

+
+
minSup: float

UserSpecified minimum support value. It has to be given in terms of count of total number of transactions +in the input database/file

+
+
startTime:float

To record the start time of the algorithm

+
+
endTime:float

To record the completion time of the algorithm

+
+
finalPatterns: dict

Storing the complete set of patterns in a dictionary variable

+
+
oFilestr

Name of the output file to store complete set of frequent patterns

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getFrequentPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to data frame

+
+
getMemoryUSS()

Total amount of USS memory consumed by the program will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the program will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the program will be retrieved from this function

+
+
+
+
+
+
+abstract getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the program will be retrieved from this function

+
+ +
+
+abstract getMemoryUSS()[source]
+

Total amount of USS memory consumed by the program will be retrieved from this function

+
+ +
+
+abstract getPatterns()[source]
+

Complete set of frequent patterns generated will be retrieved from this function

+
+ +
+
+abstract getPatternsAsDataFrame()[source]
+

Complete set of frequent patterns will be loaded in to data frame from this function

+
+ +
+
+abstract getRuntime()[source]
+

Total amount of runtime taken by the program will be retrieved from this function

+
+ +
+
+abstract printResults()[source]
+

To print all the results of execution.

+
+ +
+
+abstract save(oFile)[source]
+

Complete set of frequent patterns will be saved in to an output file from this function +:param oFile: Name of the output file +:type oFile: csv file

+
+ +
+
+abstract startMine()[source]
+

Code for the mining process will start from this function

+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.partialPeriodicFrequentPattern.html b/sphinx/_build/html/PAMI.partialPeriodicFrequentPattern.html new file mode 100644 index 000000000..1e6a538e6 --- /dev/null +++ b/sphinx/_build/html/PAMI.partialPeriodicFrequentPattern.html @@ -0,0 +1,194 @@ + + + + + + + PAMI.partialPeriodicFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.partialPeriodicPattern.basic.html b/sphinx/_build/html/PAMI.partialPeriodicPattern.basic.html new file mode 100644 index 000000000..184d4234d --- /dev/null +++ b/sphinx/_build/html/PAMI.partialPeriodicPattern.basic.html @@ -0,0 +1,673 @@ + + + + + + + PAMI.partialPeriodicPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.partialPeriodicPattern.basic package

+
+

Submodules

+
+
+

PAMI.partialPeriodicPattern.basic.GThreePGrowth module

+
+
+

PAMI.partialPeriodicPattern.basic.Gabstract module

+
+
+

PAMI.partialPeriodicPattern.basic.PPPGrowth module

+
+
+class PAMI.partialPeriodicPattern.basic.PPPGrowth.PPPGrowth(iFile, minPS, period, sep='\t')[source]
+

Bases: _partialPeriodicPatterns

+
+
Description:
+

3pgrowth is fundamental approach to mine the partial periodic patterns in temporal database.

+
+
Reference:
+

Discovering Partial Periodic Itemsets in Temporal Databases,SSDBM ‘17: Proceedings of the 29th International Conference on Scientific and Statistical Database ManagementJune 2017 +Article No.: 30 Pages 1–6https://doi.org/10.1145/3085504.3085535

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minPS – float: +Minimum partial periodic pattern…

  • +
  • period – float: +Minimum partial periodic…

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the Input file or path of the input file

+
+
oFilefile

Name of the output file or path of the output file

+
+
minPS: float or int or str

The user can specify minPS either in count or proportion of database size. +If the program detects the data type of minPS is integer, then it treats minPS is expressed in count. +Otherwise, it will be treated as float. +Example: minPS=10 will be treated as integer, while minPS=10.0 will be treated as float

+
+
period: float or int or str

The user can specify period either in count or proportion of database size. +If the program detects the data type of period is integer, then it treats period is expressed in count. +Otherwise, it will be treated as float. +Example: period=10 will be treated as integer, while period=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transactions

+
+
treeclass

it represents the Tree class

+
+
finalPatternsdict

it represents to store the patterns

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets()

Scans the dataset or dataframes and stores in list format

+
+
partialPeriodicOneItem()

Extracts the one-frequent patterns from transactions

+
+
updateTransactions()

updates the transactions by removing the aperiodic items and sort the transactions with items +by decreasing support

+
+
buildTree()

constrcuts the main tree by setting the root node as null

+
+
startMine()

main program to mine the partial periodic patterns

+
+
+
+
+
+

Executing the code on terminal:

+
+
Format:
+
+(.venv) $python3 PPPGrowth.py <inputFile> <outputFile> <minPS> <period>
+
+Examples:
+
+(.venv) $ python3 PPPGrowth.py sampleDB.txt patterns.txt 10.0 2.0
+
+
+
+
+
+

Sample run of the importing code:

+
    from PAMI.periodicFrequentPattern.basic import PPPGrowth as alg
+
+    obj = alg.PPPGrowth(iFile, minPS, period)
+
+    obj.startMine()
+
+    partialPeriodicPatterns = obj.getPatterns()
+
+    print("Total number of partial periodic Patterns:", len(partialPeriodicPatterns))
+
+    obj.save(oFile)
+
+    Df = obj.getPatternInDf()
+
+    memUSS = obj.getMemoryUSS()
+
+    print("Total Memory in USS:", memUSS)
+
+    memRSS = obj.getMemoryRSS()
+
+    print("Total Memory in RSS", memRSS)
+
+        run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, int][source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Main method where the patterns are mined by constructing tree. +:return: None

+
+ +
+
+printResults() None[source]
+

This function is used to print the results +:return: None

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to a output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Main method where the patterns are mined by constructing tree. +:return: None

+
+ +
+
+ +
+
+

PAMI.partialPeriodicPattern.basic.PPP_ECLAT module

+
+
+class PAMI.partialPeriodicPattern.basic.PPP_ECLAT.PPP_ECLAT(iFile, minPS, period, sep='\t')[source]
+

Bases: _partialPeriodicPatterns

+
+
Descripition:
+

3pEclat is the fundamental approach to mine the partial periodic frequent patterns.

+
+
Reference:
+

R. Uday Kirana,b,∗ , J.N. Venkateshd, Masashi Toyodaa , Masaru Kitsuregawaa,c , P. Krishna Reddy Discovering partial periodic-frequent patterns in a transactional database +https://www.tkl.iis.u-tokyo.ac.jp/new/uploads/publication_file/file/774/JSS_2017.pdf

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • minPS – float: +Minimum partial periodic pattern…

  • +
  • period – float: +Minimum partial periodic…

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
self.iFilefile

Name of the Input file or path of the input file

+
+
self. oFilefile

Name of the output file or path of the output file

+
+
minPS: float or int or str

The user can specify minPS either in count or proportion of database size. +If the program detects the data type of minPS is integer, then it treats minPS is expressed in count. +Otherwise, it will be treated as float. +Example: minPS=10 will be treated as integer, while minPS=10.0 will be treated as float

+
+
period: float or int or str

The user can specify period either in count or proportion of database size. +If the program detects the data type of period is integer, then it treats period is expressed in count. +Otherwise, it will be treated as float. +Example: period=10 will be treated as integer, while period=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transactions

+
+
treeclass

it represents the Tree class

+
+
finalPatternsdict

it represents to store the patterns

+
+
tidListdict

stores the timestamps of an item

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to an output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingOneitemSets()

Scan the database and store the items with their timestamps which are periodic frequent

+
+
getPeriodAndSupport()

Calculates the support and period for a list of timestamps.

+
+
Generation()

Used to implement prefix class equivalence method to generate the periodic patterns recursively

+
+
+
+
+
+

Executing the code on terminal:

+
+
Format:
+
+(.venv) $ python3 PPP_ECLAT.py <inputFile> <outputFile> <minPS> <period>
+
+Examples:
+
+(.venv) $ python3 PPP_ECLAT.py sampleDB.txt patterns.txt 0.3 0.4
+
+
+
+
+
+

Sample run of importing the code:

+

… code-block:: python

+
+

from PAMI.periodicFrequentPattern.basic import PPP_ECLAT as alg

+

obj = alg.PPP_ECLAT(iFile, minPS,period)

+

obj.startMine()

+

Patterns = obj.getPatterns()

+

print(“Total number of partial periodic patterns:”, len(Patterns))

+

obj.save(oFile)

+

Df = obj.getPatternsAsDataFrame()

+

memUSS = obj.getMemoryUSS()

+

print(“Total Memory in USS:”, memUSS)

+

memRSS = obj.getMemoryRSS()

+

print(“Total Memory in RSS”, memRSS)

+

run = obj.getRuntime()

+

print(“Total ExecutionTime in seconds:”, run)

+
+
+
+

Credits:

+

The complete program was written by P.RaviKumar under the supervision of Professor Rage Uday Kiran.

+
+
+Mine() None[source]
+

Main program start with extracting the periodic frequent items from the database and +performs prefix equivalence to form the combinations and generates partial-periodic patterns. +:return: None

+
+ +
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, int][source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+printResults() None[source]
+

This function is used to print the results +:return: None

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Main program start with extracting the periodic frequent items from the database and +performs prefix equivalence to form the combinations and generates partial-periodic patterns. +:return: None

+
+ +
+
+ +
+
+

PAMI.partialPeriodicPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.partialPeriodicPattern.closed.html b/sphinx/_build/html/PAMI.partialPeriodicPattern.closed.html new file mode 100644 index 000000000..17ce06bdf --- /dev/null +++ b/sphinx/_build/html/PAMI.partialPeriodicPattern.closed.html @@ -0,0 +1,410 @@ + + + + + + + PAMI.partialPeriodicPattern.closed package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.partialPeriodicPattern.closed package

+
+

Submodules

+
+
+

PAMI.partialPeriodicPattern.closed.PPPClose module

+
+
+class PAMI.partialPeriodicPattern.closed.PPPClose.PPPClose(iFile, periodicSupport, period, sep='\t')[source]
+

Bases: _partialPeriodicPatterns

+
+
Description:
+

+
+

PPPClose algorithm is used to discover the closed partial periodic patterns in temporal databases. +It uses depth-first search.

+
+
Reference:
+

R. Uday Kiran1 , J. N. Venkatesh2 , Philippe Fournier-Viger3 , Masashi Toyoda1 , P. Krishna Reddy2 and Masaru Kitsuregawa +https://www.tkl.iis.u-tokyo.ac.jp/new/uploads/publication_file/file/799/PAKDD.pdf

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of periodic frequent pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of periodic frequent pattern’s

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
  • iFile – str : +Name of the Input file to mine complete set of frequent pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • period – float: +Minimum partial periodic…

  • +
  • periodicSupport – float: +Minimum partial periodic…

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilestr

Input file name or path of the input file

+
+
oFilestr

Name of the output file or path of the input file

+
+
periodicSupport: int or float or str

The user can specify periodicSupport either in count or proportion of database size. +If the program detects the data type of periodicSupport is integer, then it treats periodicSupport is expressed in count. +Otherwise, it will be treated as float. +Example: periodicSupport=10 will be treated as integer, while periodicSupport=10.0 will be treated as float

+
+
period: int or float or str

The user can specify period either in count or proportion of database size. +If the program detects the data type of period is integer, then it treats period is expressed in count. +Otherwise, it will be treated as float. +Example: period=10 will be treated as integer, while period=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default separator is tab space or . +However, the users can override their default separator.

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
finalPatterns: dict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 PPPClose.py <inputFile> <outputFile> <periodicSupport> <period>
+
+Examples:
+
+(.venv) $ python3 PPPClose.py sampleTDB.txt patterns.txt 0.3 0.4
+
+
+
+
+

Sample run of the imported code:

+
from PAMI.partialPeriodicPattern.closed import PPPClose as alg
+
+obj = alg.PPPClose("../basic/sampleTDB.txt", "2", "6")
+
+obj.startMine()
+
+periodicFrequentPatterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(periodicFrequentPatterns))
+
+obj.save("patterns")
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine()[source]
+

Mining process will start from here

+
+ +
+
+printResults()[source]
+

To print all the results of execution

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to a output file

+
+
Parameters:
+

outFile (file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Mining process will start from here

+
+ +
+
+ +
+
+

PAMI.partialPeriodicPattern.closed.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.partialPeriodicPattern.html b/sphinx/_build/html/PAMI.partialPeriodicPattern.html new file mode 100644 index 000000000..fde41b686 --- /dev/null +++ b/sphinx/_build/html/PAMI.partialPeriodicPattern.html @@ -0,0 +1,280 @@ + + + + + + + PAMI.partialPeriodicPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.partialPeriodicPattern package

+
+

Subpackages

+
+ +
+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.partialPeriodicPattern.maximal.html b/sphinx/_build/html/PAMI.partialPeriodicPattern.maximal.html new file mode 100644 index 000000000..28dbc2b4d --- /dev/null +++ b/sphinx/_build/html/PAMI.partialPeriodicPattern.maximal.html @@ -0,0 +1,176 @@ + + + + + + + PAMI.partialPeriodicPattern.maximal package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.partialPeriodicPattern.maximal package

+
+

Submodules

+
+
+

PAMI.partialPeriodicPattern.maximal.Max3PGrowth module

+
+
+

PAMI.partialPeriodicPattern.maximal.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.partialPeriodicPattern.pyspark.html b/sphinx/_build/html/PAMI.partialPeriodicPattern.pyspark.html new file mode 100644 index 000000000..58a4a876d --- /dev/null +++ b/sphinx/_build/html/PAMI.partialPeriodicPattern.pyspark.html @@ -0,0 +1,176 @@ + + + + + + + PAMI.partialPeriodicPattern.pyspark package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.partialPeriodicPattern.pyspark package

+
+

Submodules

+
+
+

PAMI.partialPeriodicPattern.pyspark.abstract module

+
+
+

PAMI.partialPeriodicPattern.pyspark.parallel3PGrowth module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.partialPeriodicPattern.topk.html b/sphinx/_build/html/PAMI.partialPeriodicPattern.topk.html new file mode 100644 index 000000000..9625930c7 --- /dev/null +++ b/sphinx/_build/html/PAMI.partialPeriodicPattern.topk.html @@ -0,0 +1,508 @@ + + + + + + + PAMI.partialPeriodicPattern.topk package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.partialPeriodicPattern.topk package

+
+

Submodules

+
+
+

PAMI.partialPeriodicPattern.topk.abstract module

+
+
+class PAMI.partialPeriodicPattern.topk.abstract.partialPeriodicPatterns(iFile, k, period, sep='\t')[source]
+

Bases: ABC

+
+
Description:
+

This abstract base class defines the variables and methods that every periodic-frequent pattern mining algorithm must employ in PAMI

+
+
Attributes:
+
+
iFilestr

Input file name or path of the input file

+
+
k: int or float or str

The user can specify minPS either in count or proportion of database size. +If the program detects the data type of minPS is integer, then it treats minPS is expressed in count. +Otherwise, it will be treated as float. +Example: minPS=10 will be treated as integer, while minPS=10.0 will be treated as float

+
+
period: int or float or str

The user can specify period either in count or proportion of database size. +If the program detects the data type of period is integer, then it treats period is expressed in count. +Otherwise, it will be treated as float. +Example: period=10 will be treated as integer, while period=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
startTime:float

To record the start time of the algorithm

+
+
endTime:float

To record the completion time of the algorithm

+
+
finalPatterns: dict

Storing the complete set of patterns in a dictionary variable

+
+
oFilestr

Name of the output file to store complete set of periodic-frequent patterns

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of periodic-frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of periodic-frequent patterns will be loaded in to data frame

+
+
getMemoryUSS()

Total amount of USS memory consumed by the program will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the program will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the program will be retrieved from this function

+
+
+
+
+
+
+abstract getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the program will be retrieved from this function

+
+ +
+
+abstract getMemoryUSS()[source]
+

Total amount of USS memory consumed by the program will be retrieved from this function

+
+ +
+
+abstract getPatterns()[source]
+

Complete set of periodic-frequent patterns generated will be retrieved from this function

+
+ +
+
+abstract getPatternsAsDataFrame()[source]
+

Complete set of periodic-frequent patterns will be loaded in to data frame from this function

+
+ +
+
+abstract getRuntime()[source]
+

Total amount of runtime taken by the program will be retrieved from this function

+
+ +
+
+abstract printResults()[source]
+

To print all the results of execution

+
+ +
+
+abstract save(oFile)[source]
+

Complete set of periodic-frequent patterns will be saved in to an output file from this function

+
+
Parameters:
+

oFile (file) – Name of the output file

+
+
+
+ +
+
+abstract startMine()[source]
+

Code for the mining process will start from this function

+
+ +
+ +
+
+

PAMI.partialPeriodicPattern.topk.k3PMiner module

+
+
+class PAMI.partialPeriodicPattern.topk.k3PMiner.k3PMiner(iFile, k, period, sep='\t')[source]
+

Bases: partialPeriodicPatterns

+
+
Description:
+

k3PMiner is and algorithm to discover top - k partial periodic patterns in a temporal database.

+
+
Reference:
+

Palla Likhitha,Rage Uday Kiran, Discovering Top-K Partial Periodic Patterns in Big Temporal Databases https://dl.acm.org/doi/10.1007/978-3-031-39847-6_28

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of periodic frequent pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of periodic frequent pattern’s

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
  • iFile – str : +Name of the Input file to mine complete set of frequent pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • period – str: +Minimum partial periodic…

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilestr

Input file name or path of the input file

+
+
k: int

User specified count of top partial periodic patterns

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
oFilestr

Name of the output file or the path of the output file

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
finalPatterns: dict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets()

Scans the dataset or dataframes and stores in list format

+
+
frequentOneItem()

Generates one frequent patterns

+
+
eclatGeneration(candidateList)

It will generate the combinations of frequent items

+
+
generateFrequentPatterns(tidList)

It will generate the combinations of frequent items from a list of items

+
+
+
+
+
+

Executing the code on terminal:

+
+
Format:
+
+python3 k3PMiner.py <iFile> <oFile> <k> <period>
+
+Examples:
+
+python3 k3PMiner.py sampleDB.txt patterns.txt 10 3
+
+
+
+
+
+

Sample run of the importing code:

+

… code-block:: python

+
+

import PAMI.partialPeriodicPattern.topk.k3PMiner as alg

+

obj = alg.Topk_PPPGrowth(iFile, k, period)

+

obj.startMine()

+

partialPeriodicPatterns = obj.getPatterns()

+

print(“Total number of top partial periodic Patterns:”, len(partialPeriodicPatterns))

+

obj.save(oFile)

+

Df = obj.getPatternInDataFrame()

+

memUSS = obj.getMemoryUSS()

+

print(“Total Memory in USS:”, memUSS)

+

memRSS = obj.getMemoryRSS()

+

print(“Total Memory in RSS”, memRSS)

+

run = obj.getRuntime()

+

print(“Total ExecutionTime in seconds:”, run)

+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine()[source]
+

Main function of the program

+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Main function of the program

+
+ +
+
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.partialPeriodicPatternInMultipleTimeSeries.html b/sphinx/_build/html/PAMI.partialPeriodicPatternInMultipleTimeSeries.html new file mode 100644 index 000000000..d609af3e8 --- /dev/null +++ b/sphinx/_build/html/PAMI.partialPeriodicPatternInMultipleTimeSeries.html @@ -0,0 +1,413 @@ + + + + + + + PAMI.partialPeriodicPatternInMultipleTimeSeries package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.partialPeriodicPatternInMultipleTimeSeries package

+
+

Submodules

+
+
+

PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth module

+
+
+class PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth.PPGrowth(iFile, periodicSupport, period, sep='\t')[source]
+

Bases: _partialPeriodicPatterns

+
+

About this algorithm

+
+
Description:
+

PPGrowth is one of the fundamental algorithm to discover periodic-frequent patterns in a transactional database.

+
+
Reference:
+

C. Saideep, R. Uday Kiran, K. Zettsu, P. Fournier-Viger, M. Kitsuregawa and P. Krishna Reddy, +“Discovering Periodic Patterns in Irregular Time Series,” 2019 International Conference on Data Mining Workshops (ICDMW), 2019,

+
+

pp. 1020-1028, doi: 10.1109/ICDMW.2019.00147.

+
+
+
param iFile:
+

str : +Name of the Input file to mine complete set of periodic frequent pattern’s

+
+
param oFile:
+

str : +Name of the output file to store complete set of periodic frequent pattern’s

+
+
param sep:
+

str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

+
+
Attributes:
+
+
iFilefile

Name of the Input file or path of the input file

+
+
oFilefile

Name of the output file or path of the output file

+
+
minSup: int or float or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
maxPer: int or float or str

The user can specify maxPer either in count or proportion of database size. +If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. +Otherwise, it will be treated as float. +Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

To represent the total no of transaction

+
+
treeclass

To represents the Tree class

+
+
itemSetCountint

To represents the total no of patterns

+
+
finalPatternsdict

To store the complete patterns

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of periodic-frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of periodic-frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets(fileName)

Scans the dataset and stores in a list format

+
+
PeriodicFrequentOneItem()

Extracts the one-periodic-frequent patterns from database

+
+
updateDatabases()

Update the database by removing aperiodic items and sort the Database by item decreased support

+
+
buildTree()

After updating the Database, remaining items will be added into the tree by setting root node as null

+
+
convert()

to convert the user specified value

+
+
+
+
+
+
+

Execution methods

+

Terminal command

+
Format:
+
+(.venv) $ python3 PPGrowth.py <inputFile> <outputFile> <minSup> <maxPer>
+
+Examples:
+
+(.venv) $  python3 PPGrowth.py sampleTDB.txt patterns.txt 0.3 0.4
+
+
+
+

Sample run of importing the code:

+
+

from PAMI.periodicFrequentPattern.basic import PPGrowth as alg

+

obj = alg.PPGrowth(iFile, minSup, maxPer)

+

obj.startMine()

+

periodicFrequentPatterns = obj.getPatterns()

+

print(“Total number of Periodic Frequent Patterns:”, len(periodicFrequentPatterns))

+

obj.save(oFile)

+

Df = obj.getPatternsAsDataFrame()

+

memUSS = obj.getMemoryUSS()

+

print(“Total Memory in USS:”, memUSS)

+

memRSS = obj.getMemoryRSS()

+

print(“Total Memory in RSS”, memRSS)

+

run = obj.getRuntime()

+

print(“Total ExecutionTime in seconds:”, run)

+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+Mine()[source]
+

Mining process will start from this function

+
+ +
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of periodic-frequent patterns after completion of the mining process

+
+
Returns:
+

returning periodic-frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final periodic-frequent patterns in a dataframe

+
+
Returns:
+

returning periodic-frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of periodic-frequent patterns will be loaded in to a output file

+
+
Parameters:
+

outFile (file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Mining process will start from this function

+
+ +
+
+
+ +
+
+

PAMI.partialPeriodicPatternInMultipleTimeSeries.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.periodicCorrelatedPattern.basic.html b/sphinx/_build/html/PAMI.periodicCorrelatedPattern.basic.html new file mode 100644 index 000000000..46e196c87 --- /dev/null +++ b/sphinx/_build/html/PAMI.periodicCorrelatedPattern.basic.html @@ -0,0 +1,413 @@ + + + + + + + PAMI.periodicCorrelatedPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.periodicCorrelatedPattern.basic package

+
+

Submodules

+
+
+

PAMI.periodicCorrelatedPattern.basic.EPCPGrowth module

+
+
+class PAMI.periodicCorrelatedPattern.basic.EPCPGrowth.EPCPGrowth(iFile, minSup, minAllConf, maxPer, maxPerAllConf, sep='\t')[source]
+

Bases: _periodicCorrelatedPatterns

+
+
Description:
+

EPCPGrowth is an algorithm to discover periodic-Correlated patterns in a temporal database.

+
+
Reference:
+

http://www.tkl.iis.u-tokyo.ac.jp/new/uploads/publication_file/file/897/Venkatesh2018_Chapter_DiscoveringPeriodic-Correlated.pdf

+
+
Attributes:
+
+
iFilefile

Name of the Input file or path of the input file

+
+
oFilefile

Name of the output file or path of the output file

+
+
minSupint or float or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
minAllConfint or float or str

The user can specify minAllConf either in count or proportion of database size. +If the program detects the data type of minAllConf is integer, then it treats minAllCOnf is expressed in count. +Otherwise, it will be treated as float. +Example: minAllCOnf=10 will be treated as integer, while minAllConf=10.0 will be treated as float

+
+
maxPerint or float or str

The user can specify maxPer either in count or proportion of database size. +If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. +Otherwise, it will be treated as float. +Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float

+
+
maxPerAllConfint or float or str

The user can specify maxPerAllConf either in count or proportion of database size. +If the program detects the data type of maaxPerAllConf is integer, then it treats maxPerAllConf is expressed in count. +Otherwise, it will be treated as float. +Example : maxPerAllConf=10 will be treated as integer, while maxPerAllConf=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

To represent the total no of transaction

+
+
treeclass

To represents the Tree class

+
+
itemSetCountint

To represents the total no of patterns

+
+
finalPatternsdict

To store the complete patterns

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of periodic-frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of periodic-frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets(fileName)

Scans the dataset and stores in a list format

+
+
PeriodicFrequentOneItem()

Extracts the one-periodic-frequent patterns from database

+
+
updateDatabases()

Update the database by removing aperiodic items and sort the Database by item decreased support

+
+
buildTree()

After updating the Database, remaining items will be added into the tree by setting root node as null

+
+
convert()

to convert the user specified value

+
+
+
+
+
+

Executing the code on terminal:

+
+
+
Format:
>>> python3 PFPGrowth.py <inputFile> <outputFile> <minSup> <maxPer>
+
+
+
+
Examples:
>>> python3 PFPGrowth.py sampleTDB.txt patterns.txt 0.3 0.4
+
+
+
+
+
+
+
+

Sample run of importing the code:

+
from PAMI.periodicCorrelatedPattern.basic import EPCPGrowth as alg
+
+obj = alg.EPCPGrowth(iFile, minSup, minAllCOnf, maxPer, maxPerAllConf)
+
+obj.startMine()
+
+periodicCorrelatedPatterns = obj.getPatterns()
+
+print("Total number of Periodic Frequent Patterns:", len(periodicCorrelatedPatterns))
+
+obj.save(oFile)
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() dict[source]
+

Function to send the set of periodic-frequent patterns after completion of the mining process

+
+
Returns:
+

returning periodic-frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final periodic-frequent patterns in a dataframe

+
+
Returns:
+

returning periodic-frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+printResults() None[source]
+

This function is used to print thr results

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of periodic-frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+startMine() None[source]
+

Mining process will start from this function

+
+ +
+
+ +
+
+

PAMI.periodicCorrelatedPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.periodicCorrelatedPattern.html b/sphinx/_build/html/PAMI.periodicCorrelatedPattern.html new file mode 100644 index 000000000..14974ccf9 --- /dev/null +++ b/sphinx/_build/html/PAMI.periodicCorrelatedPattern.html @@ -0,0 +1,193 @@ + + + + + + + PAMI.periodicCorrelatedPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.periodicFrequentPattern.basic.html b/sphinx/_build/html/PAMI.periodicFrequentPattern.basic.html new file mode 100644 index 000000000..bb386909d --- /dev/null +++ b/sphinx/_build/html/PAMI.periodicFrequentPattern.basic.html @@ -0,0 +1,1485 @@ + + + + + + + PAMI.periodicFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.periodicFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.periodicFrequentPattern.basic.PFECLAT module

+
+
+class PAMI.periodicFrequentPattern.basic.PFECLAT.PFECLAT(iFile, minSup, maxPer, sep='\t')[source]
+

Bases: _periodicFrequentPatterns

+
+
Description:
+

PFECLAT is the fundamental approach to mine the periodic-frequent patterns.

+
+
Reference:
+

P. Ravikumar, P.Likhitha, R. Uday kiran, Y. Watanobe, and Koji Zettsu, “Towards efficient discovery of +periodic-frequent patterns in columnar temporal databases”, 2021 IEA/AIE.

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of periodic frequent pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of periodic frequent pattern’s

  • +
  • minSup – str: +Controls the minimum number of transactions in which every item must appear in a database.

  • +
  • maxPer – str: +Controls the maximum number of transactions in which any two items within a pattern can reappear.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the Input file or path of the input file

+
+
oFilefile

Name of the output file or path of the output file

+
+
minSupint or float or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
maxPerint or float or str

The user can specify maxPer either in count or proportion of database size. +If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. +Otherwise, it will be treated as float. +Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transactions

+
+
treeclass

it represents the Tree class

+
+
itemSetCountint

it represents the total no of patterns

+
+
finalPatternsdict

it represents to store the patterns

+
+
tidListdict

stores the timestamps of an item

+
+
hashingdict

stores the patterns with their support to check for the closed property

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of periodic-frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of periodic-frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingOneItemSets()

Scan the database and store the items with their timestamps which are periodic frequent

+
+
getPeriodAndSupport()

Calculates the support and period for a list of timestamps.

+
+
Generation()

Used to implement prefix class equivalence method to generate the periodic patterns recursively

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 PFECLAT.py <inputFile> <outputFile> <minSup>
+
+Example usage:
+
+(.venv) $ python3 PFECLAT.py sampleDB.txt patterns.txt 10.0
+
+
+
+        .. note:: minSup will be considered in percentage of database transactions
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.periodicFrequentPattern.basic import PFECLAT as alg
+
+   obj = alg.PFECLAT("../basic/sampleTDB.txt", "2", "5")
+
+   obj.startMine()
+
+   periodicFrequentPatterns = obj.getPatterns()
+
+   print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns))
+
+   obj.save("patterns")
+
+   Df = obj.getPatternsAsDataFrame()
+
+   memUSS = obj.getMemoryUSS()
+
+   print("Total Memory in USS:", memUSS)
+
+   memRSS = obj.getMemoryRSS()
+
+   print("Total Memory in RSS", memRSS)
+
+   run = obj.getRuntime()
+
+   print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+Mine() None[source]
+

Mining process will start from this function +:return: None

+
+ +
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() dict[source]
+

Function to send the set of periodic-frequent patterns after completion of the mining process

+
+
Returns:
+

returning periodic-frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final periodic-frequent patterns in a dataframe

+
+
Returns:
+

returning periodic-frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+printResults() None[source]
+

This function is used to print the results +:return: None

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of periodic-frequent patterns will be loaded in to a output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Mining process will start from this function +:return: None

+
+ +
+
+ +
+
+

PAMI.periodicFrequentPattern.basic.PFPGrowth module

+
+
+class PAMI.periodicFrequentPattern.basic.PFPGrowth.PFPGrowth(iFile, minSup, maxPer, sep='\t')[source]
+

Bases: _periodicFrequentPatterns

+
+
Description:
+

PFPGrowth is one of the fundamental algorithm to discover periodic-frequent patterns in a transactional database.

+
+
Reference:
+

Syed Khairuzzaman Tanbeer, Chowdhury Farhan, Byeong-Soo Jeong, and Young-Koo Lee, “Discovering Periodic-Frequent +Patterns in Transactional Databases”, PAKDD 2009, https://doi.org/10.1007/978-3-642-01307-2_24

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of periodic frequent pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of periodic frequent pattern’s

  • +
  • minSup – str: +Controls the minimum number of transactions in which every item must appear in a database.

  • +
  • maxPer – float: +Controls the maximum number of transactions in which any two items within a pattern can reappear.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the Input file or path of the input file

+
+
oFilefile

Name of the output file or path of the output file

+
+
minSupint or float or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
maxPerint or float or str

The user can specify maxPer either in count or proportion of database size. +If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. +Otherwise, it will be treated as float. +Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

To represent the total no of transaction

+
+
treeclass

To represents the Tree class

+
+
itemSetCountint

To represents the total no of patterns

+
+
finalPatternsdict

To store the complete patterns

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of periodic-frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of periodic-frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets(fileName)

Scans the dataset and stores in a list format

+
+
PeriodicFrequentOneItem()

Extracts the one-periodic-frequent patterns from database

+
+
updateDatabases()

Update the database by removing aperiodic items and sort the Database by item decreased support

+
+
buildTree()

After updating the Database, remaining items will be added into the tree by setting root node as null

+
+
convert()

to convert the user specified value

+
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+Mine() None[source]
+

Mining process will start from this function +:return: None

+
+ +
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, Tuple[int, int]][source]
+

Function to send the set of periodic-frequent patterns after completion of the mining process

+
+
Returns:
+

returning periodic-frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final periodic-frequent patterns in a dataframe

+
+
Returns:
+

returning periodic-frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+printResults() None[source]
+

This function is used to print the results +:return: None

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of periodic-frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Mining process will start from this function +:return: None

+
+ +
+
+ +
+
+

PAMI.periodicFrequentPattern.basic.PFPGrowthPlus module

+
+
+class PAMI.periodicFrequentPattern.basic.PFPGrowthPlus.PFPGrowthPlus(iFile, minSup, maxPer, sep='\t')[source]
+

Bases: _periodicFrequentPatterns

+
+
+
Description:
+

PFPGrowthPlus is fundamental and improved version of PFPGrowth algorithm to discover periodic-frequent patterns in temporal database. +It uses greedy approach to discover effectively

+
+
Reference:
+

R. UdayKiran, MasaruKitsuregawa, and P. KrishnaReddyd, “Efficient discovery of periodic-frequent patterns in +very large databases,” Journal of Systems and Software February 2016 https://doi.org/10.1016/j.jss.2015.10.035

+
+
param iFile:
+

str : +Name of the Input file to mine complete set of periodic frequent pattern’s

+
+
param oFile:
+

str : +Name of the output file to store complete set of periodic frequent pattern’s

+
+
param minSup:
+

str: +Controls the minimum number of transactions in which every item must appear in a database.

+
+
param maxPer:
+

str: +Controls the maximum number of transactions in which any two items within a pattern can reappear.

+
+
param sep:
+

str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

+
+
Attributes:
+
+
iFilefile

Name of the Input file or path of the input file

+
+
oFilefile

Name of the output file or path of the output file

+
+
minSupint or float or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
maxPerint or float or str

The user can specify maxPer either in count or proportion of database size. +If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. +Otherwise, it will be treated as float. +Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transaction

+
+
treeclass

it represents the Tree class

+
+
itemSetCountint

it represents the total no of patterns

+
+
finalPatternsdict

it represents to store the patterns

+
+
+
+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of periodic-frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of periodic-frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
check(line)

To check the delimiter used in the user input file

+
+
creatingItemSets(fileName)

Scans the dataset or dataframes and stores in list format

+
+
PeriodicFrequentOneItem()

Extracts the one-periodic-frequent patterns from Databases

+
+
updateDatabases()

update the Databases by removing aperiodic items and sort the Database by item decreased support

+
+
buildTree()

after updating the Databases ar added into the tree by setting root node as null

+
+
startMine()

the main method to run the program

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 PFPGrowthPlus.py <inputFile> <outputFile> <minSup> <maxPer>
+
+Example:
+
+(.venv) $ python3 PFPGrowthPlus.py sampleTDB.txt patterns.txt 0.3 0.4
+
+
+        .. note:: minSup will be considered in percentage of database transactions
+
+
+
+
+

Importing this algorithm into a python program

+
+
from PAMI.periodicFrequentPattern.basic import PFPGorwthPlus as alg
+
+obj = alg.PFPGrowthPlus("../basic/sampleTDB.txt", "2", "6")
+
+obj.startMine()
+
+periodicFrequentPatterns = obj.getPatterns()
+
+print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns))
+
+obj.save("patterns")
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, Tuple[int, int]][source]
+

Function to send the set of periodic-frequent patterns after completion of the mining process

+
+
Returns:
+

returning periodic-frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final periodic-frequent patterns in a dataframe

+
+
Returns:
+

returning periodic-frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+printResults() None[source]
+

This function is used to print the results +:return: None

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of periodic-frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Main method where the patterns are mined by constructing tree. +:return: None

+
+ +
+
+ +
+
+

PAMI.periodicFrequentPattern.basic.PFPMC module

+
+
+class PAMI.periodicFrequentPattern.basic.PFPMC.PFPMC(iFile, minSup, maxPer, sep='\t')[source]
+

Bases: _periodicFrequentPatterns

+
+
Description:
+

PFPMC is the fundamental approach to mine the periodic-frequent patterns.

+
+
Reference:
+

(has to be added)

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of periodic frequent pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of periodic frequent pattern’s

  • +
  • minSup – str: +Controls the minimum number of transactions in which every item must appear in a database.

  • +
  • maxPer – str: +Controls the maximum number of transactions in which any two items within a pattern can reappear.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the Input file or path of the input file

+
+
oFilefile

Name of the output file or path of the output file

+
+
minSupint or float or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
maxPerint or float or str

The user can specify maxPer either in count or proportion of database size. +If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. +Otherwise, it will be treated as float. +Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transactions

+
+
treeclass

it represents the Tree class

+
+
itemSetCountint

it represents the total no of patterns

+
+
finalPatternsdict

it represents to store the patterns

+
+
tidListdict

stores the timestamps of an item

+
+
hashingdict

stores the patterns with their support to check for the closed property

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of periodic-frequent patterns will be loaded in to an output file

+
+
getPatternsAsDataFrame()

Complete set of periodic-frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingOneItemSets()

Scan the database and store the items with their timestamps which are periodic frequent

+
+
getPeriodAndSupport()

Calculates the support and period for a list of timestamps.

+
+
Generation()

Used to implement prefix class equivalence method to generate the periodic patterns recursively

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 PFPMC.py <inputFile> <outputFile> <minSup> <maxPer>
+
+Example usage:
+
+(.venv) $ python3 PFPMC.py sampleDB.txt patterns.txt 10.0 4.0
+
+
+        .. note:: minSup and maxPer will be considered in percentage of database transactions
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.periodicFrequentPattern.basic import PFPMC as alg
+
+obj = alg.PFPMC("../basic/sampleTDB.txt", "2", "5")
+
+obj.startMine()
+
+periodicFrequentPatterns = obj.getPatterns()
+
+print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns))
+
+obj.save("patterns")
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() dict[source]
+

Function to send the set of periodic-frequent patterns after completion of the mining process

+
+
Returns:
+

returning periodic-frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final periodic-frequent patterns in a dataframe

+
+
Returns:
+

returning periodic-frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+printResults() None[source]
+

This function is used to print the results +:return: None

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of periodic-frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Mining process will start from this function +:return: None

+
+ +
+
+ +
+
+

PAMI.periodicFrequentPattern.basic.PSGrowth module

+
+
+class PAMI.periodicFrequentPattern.basic.PSGrowth.Node(item, children)[source]
+

Bases: object

+

A class used to represent the node of frequentPatternTree

+
+
Attributes:
+
+
itemint

storing item of a node

+
+
timeStampslist

To maintain the timeStamps of Database at the end of the branch

+
+
parentnode

To maintain the parent of every node

+
+
childrenlist

To maintain the children of node

+
+
+
+
Methods:
+
+
addChild(itemName)

storing the children to their respective parent nodes

+
+
+
+
+
+
+addChild(node) None[source]
+

Appends the children node details to a parent node

+
+
Parameters:
+

node – children node

+
+
Returns:
+

appending children node to parent node

+
+
+
+ +
+ +
+
+class PAMI.periodicFrequentPattern.basic.PSGrowth.PSGrowth(iFile, minSup, maxPer, sep='\t')[source]
+

Bases: _periodicFrequentPatterns

+
+
Description:
+

PS-Growth is one of the fundamental algorithm to discover periodic-frequent patterns in a temporal database.

+
+
+
+
:ReferenceA. Anirudh, R. U. Kiran, P. K. Reddy and M. Kitsuregaway, “Memory efficient mining of periodic-frequent

patterns in transactional databases,” 2016 IEEE Symposium Series on Computational Intelligence (SSCI), +2016, pp. 1-8, https://doi.org/10.1109/SSCI.2016.7849926

+
+
+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of periodic frequent pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of periodic frequent pattern’s

  • +
  • minSup – str: +Controls the minimum number of transactions in which every item must appear in a database.

  • +
  • maxPer – str: +Controls the maximum number of transactions in which any two items within a pattern can reappear.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the Input file or path of the input file

+
+
oFilefile

Name of the output file or path of the output file

+
+
minSup: int or float or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
maxPer: int or float or str

The user can specify maxPer either in count or proportion of database size. +If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. +Otherwise, it will be treated as float. +Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default separator is tab space or . +However, the users can override their default separator.

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transaction

+
+
treeclass

it represents the Tree class

+
+
itemSetCountint

it represents the total no of patterns

+
+
finalPatternsdict

it represents to store the patterns

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of periodic-frequent patterns will be loaded in to an output file

+
+
getConditionalPatternsInDataFrame()

Complete set of periodic-frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
OneLengthItems()

Scans the dataset or dataframes and stores in list format

+
+
buildTree()

after updating the Databases ar added into the tree by setting root node as null

+
+
+
+
+
+

Methods to execute code on terminal

+
+
Format:
+
+(.venv) $ python3 PSGrowth.py <inputFile> <outputFile> <minSup> <maxPer>
+
+Example:
+
+(.venv) $ python3 PSGrowth.py sampleTDB.txt patterns.txt 0.3 0.4
+
+
+
+        .. note:: minSup will be considered in percentage of database transactions
+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.periodicFrequentPattern.basic import PSGrowth as alg
+
+obj = alg.PSGrowth("../basic/sampleTDB.txt", "2", "6")
+
+obj.startMine()
+
+periodicFrequentPatterns = obj.getPatterns()
+
+print("Total number of  Patterns:", len(periodicFrequentPatterns))
+
+obj.save("patterns")
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+Mine() None[source]
+

Mining process will start from this function +:return: None

+
+ +
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() dict[source]
+

Function to send the set of periodic-frequent patterns after completion of the mining process

+
+
Returns:
+

returning periodic-frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final periodic-frequent patterns in a dataframe

+
+
Returns:
+

returning periodic-frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+printResults() None[source]
+

This function is used to print the results +:return: None

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of periodic-frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Mining process will start from this function +:return: None

+
+ +
+
+ +
+
+PAMI.periodicFrequentPattern.basic.PSGrowth.conditionalTransactions(patterns, timestamp) Tuple[List[List[int]], List[List[_Interval]], Dict[int, Tuple[int, int]]][source]
+

To sort and update the conditional transactions by removing the items which fails frequency +and periodicity conditions

+
+
Parameters:
+
    +
  • patterns – conditional patterns of a node

  • +
  • timestamp – timeStamps of a conditional pattern

  • +
+
+
Returns:
+

conditional transactions with their respective timeStamps

+
+
+
+ +
+
+PAMI.periodicFrequentPattern.basic.PSGrowth.getPeriodAndSupport(timeStamps) List[int][source]
+

Calculates the period and support of list of timeStamps

+
+
Parameters:
+

timeStamps – timeStamps of a pattern or item

+
+
Returns:
+

support and periodicity

+
+
+
+ +
+
+

PAMI.periodicFrequentPattern.basic.abstract module

+
+
+

PAMI.periodicFrequentPattern.basic.parallelPFPGrowth module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.periodicFrequentPattern.closed.html b/sphinx/_build/html/PAMI.periodicFrequentPattern.closed.html new file mode 100644 index 000000000..d759e030f --- /dev/null +++ b/sphinx/_build/html/PAMI.periodicFrequentPattern.closed.html @@ -0,0 +1,410 @@ + + + + + + + PAMI.periodicFrequentPattern.closed package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.periodicFrequentPattern.closed package

+
+

Submodules

+
+
+

PAMI.periodicFrequentPattern.closed.CPFPMiner module

+
+
+class PAMI.periodicFrequentPattern.closed.CPFPMiner.CPFPMiner(iFile, minSup, maxPer, sep='\t')[source]
+

Bases: _periodicFrequentPatterns

+
+

About this algorithm

+
+
Description:
+

CPFPMiner algorithm is used to discover the closed periodic frequent patterns in temporal databases. +It uses depth-first search.

+
+
Reference:
+

P. Likhitha et al., “Discovering Closed Periodic-Frequent Patterns in Very Large Temporal Databases” +2020 IEEE International Conference on Big Data (Big Data), 2020, https://ieeexplore.ieee.org/document/9378215

+
+
param iFile:
+

str : +Name of the Input file to mine complete set of periodic frequent pattern’s

+
+
param oFile:
+

str : +Name of the output file to store complete set of periodic frequent pattern’s

+
+
param minSup:
+

float: +Controls the minimum number of transactions in which every item must appear in a database.

+
+
param maxPer:
+

float: +Controls the maximum number of transactions in which any two items within a pattern can reappear.

+
+
param sep:
+

str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

+
+
Attributes:
+
+
iFilestr

Input file name or path of the input file

+
+
oFilestr

Name of the output file or path of the input file

+
+
minSup: int or float or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
maxPer: int or float or str

The user can specify maxPer either in count or proportion of database size. +If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. +Otherwise, it will be treated as float. +Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
finalPatterns: dict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to an output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
+
+
+
+
+

Execution methods

+

Terminal command

+
  Format:
+
+  (.venv) $  python3 CPFPMiner.py <inputFile> <outputFile> <minSup> <maxPer>
+
+  Example:
+
+  (.venv) $ python3 CPFPMiner.py sampleTDB.txt patterns.txt 0.3 0.4
+
+.. note:: minSup will be considered in percentage of database transactions
+
+
+

Calling from a python program

+
from PAMI.periodicFrequentPattern.closed import CPFPMiner as alg
+
+obj = alg.CPFPMiner("../basic/sampleTDB.txt", "2", "6")
+
+obj.startMine()
+
+periodicFrequentPatterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(periodicFrequentPatterns))
+
+obj.save("patterns")
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+

Credits:

+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+Mine()[source]
+

Mining process will start from here

+
+ +
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Mining process will start from here

+
+ +
+
+
+ +
+
+

PAMI.periodicFrequentPattern.closed.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.periodicFrequentPattern.cuda.html b/sphinx/_build/html/PAMI.periodicFrequentPattern.cuda.html new file mode 100644 index 000000000..5c3dc5796 --- /dev/null +++ b/sphinx/_build/html/PAMI.periodicFrequentPattern.cuda.html @@ -0,0 +1,179 @@ + + + + + + + PAMI.periodicFrequentPattern.cuda package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.periodicFrequentPattern.cuda package

+
+

Submodules

+
+
+

PAMI.periodicFrequentPattern.cuda.abstract module

+
+
+

PAMI.periodicFrequentPattern.cuda.cuGPFMiner module

+
+
+

PAMI.periodicFrequentPattern.cuda.gPFMinerBit module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.periodicFrequentPattern.html b/sphinx/_build/html/PAMI.periodicFrequentPattern.html new file mode 100644 index 000000000..b4451367a --- /dev/null +++ b/sphinx/_build/html/PAMI.periodicFrequentPattern.html @@ -0,0 +1,337 @@ + + + + + + + PAMI.periodicFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.periodicFrequentPattern package

+
+

Subpackages

+
+ +
+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.periodicFrequentPattern.maximal.html b/sphinx/_build/html/PAMI.periodicFrequentPattern.maximal.html new file mode 100644 index 000000000..ac4f3ca77 --- /dev/null +++ b/sphinx/_build/html/PAMI.periodicFrequentPattern.maximal.html @@ -0,0 +1,431 @@ + + + + + + + PAMI.periodicFrequentPattern.maximal package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.periodicFrequentPattern.maximal package

+
+

Submodules

+
+
+

PAMI.periodicFrequentPattern.maximal.MaxPFGrowth module

+
+
+class PAMI.periodicFrequentPattern.maximal.MaxPFGrowth.MaxPFGrowth(iFile: Any, minSup: int | float | str, maxPer: int | float | str, sep: str = '\t')[source]
+

Bases: _periodicFrequentPatterns

+
+
Description:
+

MaxPF-Growth is one of the fundamental algorithm to discover maximal periodic-frequent +patterns in a temporal database.

+
+
Reference:
+

R. Uday Kiran, Yutaka Watanobe, Bhaskar Chaudhury, Koji Zettsu, Masashi Toyoda, Masaru Kitsuregawa, +“Discovering Maximal Periodic-Frequent Patterns in Very Large Temporal Databases”, +IEEE 2020, https://ieeexplore.ieee.org/document/9260063

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of periodic frequent pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of periodic frequent pattern’s

  • +
  • minSup – str: +Controls the minimum number of transactions in which every item must appear in a database.

  • +
  • maxPer – float: +Controls the maximum number of transactions in which any two items within a pattern can reappear.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the Input file or path of the input file

+
+
oFilefile

Name of the output file or path of the output file

+
+
minSup: int or float or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
maxPer: int or float or str

The user can specify maxPer either in count or proportion of database size. +If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. +Otherwise, it will be treated as float. +Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default separator is tab space or . +However, the users can override their default separator.

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transaction

+
+
treeclass

it represents the Tree class

+
+
itemSetCountint

it represents the total no of patterns

+
+
finalPatternsdict

it represents to store the patterns

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of periodic-frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of periodic-frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets(fileName)

Scans the dataset or dataframes and stores in list format

+
+
PeriodicFrequentOneItem()

Extracts the one-periodic-frequent patterns from Databases

+
+
updateDatabases()

update the Databases by removing aperiodic items and sort the Database by item decreased support

+
+
buildTree()

after updating the Databases ar added into the tree by setting root node as null

+
+
startMine()

the main method to run the program

+
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 maxpfrowth.py <inputFile> <outputFile> <minSup> <maxPer>
+
+Examples usage :
+
+(.venv) $ python3 maxpfrowth.py sampleTDB.txt patterns.txt 0.3 0.4
+
+
+        .. note:: minSup will be considered in percentage of database transactions
+
+
+
+
+

Sample run of the imported code:

+
from PAMI.periodicFrequentPattern.maximal import MaxPFGrowth as alg
+
+obj = alg.MaxPFGrowth("../basic/sampleTDB.txt", "2", "6")
+
+obj.startMine()
+
+Patterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(Patterns))
+
+obj.save("patterns")
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+Mine() None[source]
+

Mining process will start from this function +:return: None

+
+ +
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, Tuple[int, int]][source]
+

Function to send the set of periodic-frequent patterns after completion of the mining process

+
+
Returns:
+

returning periodic-frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final periodic-frequent patterns in a dataframe

+
+
Returns:
+

returning periodic-frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+printResults() None[source]
+

To print the results of the execution.

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of periodic-frequent patterns will be loaded in to a output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Mining process will start from this function +:return: None

+
+ +
+
+ +
+
+

PAMI.periodicFrequentPattern.maximal.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.periodicFrequentPattern.pyspark.html b/sphinx/_build/html/PAMI.periodicFrequentPattern.pyspark.html new file mode 100644 index 000000000..c8cf11e5c --- /dev/null +++ b/sphinx/_build/html/PAMI.periodicFrequentPattern.pyspark.html @@ -0,0 +1,176 @@ + + + + + + + PAMI.periodicFrequentPattern.pyspark package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.periodicFrequentPattern.pyspark package

+
+

Submodules

+
+
+

PAMI.periodicFrequentPattern.pyspark.abstract module

+
+
+

PAMI.periodicFrequentPattern.pyspark.parallelPFPGrowth module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.periodicFrequentPattern.topk.TopkPFP.html b/sphinx/_build/html/PAMI.periodicFrequentPattern.topk.TopkPFP.html new file mode 100644 index 000000000..22d606b17 --- /dev/null +++ b/sphinx/_build/html/PAMI.periodicFrequentPattern.topk.TopkPFP.html @@ -0,0 +1,402 @@ + + + + + + + PAMI.periodicFrequentPattern.topk.TopkPFP package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.periodicFrequentPattern.topk.TopkPFP package

+
+

Submodules

+
+
+

PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP module

+
+
+class PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP.TopkPFPGrowth(iFile, minSup, maxPer, sep='\t')[source]
+

Bases: _periodicFrequentPatterns

+
+
+
Description:
+

Top - K is and algorithm to discover top periodic frequent patterns in a temporal database.

+
+
Reference:
+

Komate Amphawan, Philippe Lenca, Athasit Surarerks: “Mining Top-K Periodic-Frequent Pattern from Transactional Databases without Support Threshold” +International Conference on Advances in Information Technology: https://link.springer.com/chapter/10.1007/978-3-642-10392-6_3

+
+
param iFile:
+

str : +Name of the Input file to mine complete set of periodic frequent pattern’s

+
+
param oFile:
+

str : +Name of the output file to store complete set of periodic frequent pattern’s

+
+
param maxPer:
+

str: +Controls the maximum number of transactions in which any two items within a pattern can reappear.

+
+
param sep:
+

str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

+
+
Attributes:
+
+
iFilestr

Input file name or path of the input file

+
+
k: int

User specified counte of top frequent patterns

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
oFilestr

Name of the output file or the path of the output file

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
finalPatterns: dict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets()

Scans the dataset or dataframes and stores in list format

+
+
frequentOneItem()

Generates one frequent patterns

+
+
eclatGeneration(candidateList)

It will generate the combinations of frequent items

+
+
generateFrequentPatterns(tidList)

It will generate the combinations of frequent items from a list of items

+
+
+
+
+
+
   Format:
+
+   (.venv) $ python3 TopkPFP.py <inputFile> <outputFile> <k> <maxPer>
+
+   Examples:
+
+   (.venv) $ python3 TopkPFP.py sampleDB.txt patterns.txt 10 3
+
+
+**Sample run of the importing code:**
+---------------------------------------
+.. code-block:: python
+
+        import PAMI.periodicFrequentPattern.topk.TopkPFPGrowth as alg
+
+        obj = alg.TopkPFPGrowth(iFile, k, maxPer)
+
+        obj.startMine()
+
+        periodicFrequentPatterns = obj.getPatterns()
+
+        print("Total number of Frequent Patterns:", len(periodicFrequentPatterns))
+
+        obj.save(oFile)
+
+        Df = obj.getPatternInDataFrame()
+
+        memUSS = obj.getMemoryUSS()
+
+        print("Total Memory in USS:", memUSS)
+
+        memRSS = obj.getMemoryRSS()
+
+        print("Total Memory in RSS", memRSS)
+
+        run = obj.getRuntime()
+
+        print("Total ExecutionTime in seconds:", run)
+
+**Credits:**
+--------------
+        The complete program was written by P.Likhitha  under the supervision of Professor Rage Uday Kiran.
+
+
+
+
+Mine()[source]
+

Main function of the program

+
+ +
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+printResults()[source]
+

To print the results of the execution.

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to a output file

+
+
Parameters:
+

outFile (file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Main function of the program

+
+ +
+ +
+
+

PAMI.periodicFrequentPattern.topk.TopkPFP.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.periodicFrequentPattern.topk.html b/sphinx/_build/html/PAMI.periodicFrequentPattern.topk.html new file mode 100644 index 000000000..f14ebad2b --- /dev/null +++ b/sphinx/_build/html/PAMI.periodicFrequentPattern.topk.html @@ -0,0 +1,217 @@ + + + + + + + PAMI.periodicFrequentPattern.topk package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.periodicFrequentPattern.topk.kPFPMiner.html b/sphinx/_build/html/PAMI.periodicFrequentPattern.topk.kPFPMiner.html new file mode 100644 index 000000000..64e2a1c62 --- /dev/null +++ b/sphinx/_build/html/PAMI.periodicFrequentPattern.topk.kPFPMiner.html @@ -0,0 +1,405 @@ + + + + + + + PAMI.periodicFrequentPattern.topk.kPFPMiner package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.periodicFrequentPattern.topk.kPFPMiner package

+
+

Submodules

+
+
+

PAMI.periodicFrequentPattern.topk.kPFPMiner.abstract module

+
+
+

PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner module

+
+
+class PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner.kPFPMiner(iFile, k, sep='\t')[source]
+

Bases: _periodicFrequentPatterns

+
+
Description:
+

Top - K is and algorithm to discover top periodic-frequent patterns in a temporal database.

+
+
Reference:
+
+
Likhitha, P., Ravikumar, P., Kiran, R.U., Watanobe, Y. (2022).

Discovering Top-k Periodic-Frequent Patterns in Very Large Temporal Databases. Big Data Analytics.

+
+
+

BDA 2022. Lecture Notes in Computer Science, vol 13773. Springer, Cham. https://doi.org/10.1007/978-3-031-24094-2_14

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of periodic frequent pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of periodic frequent pattern’s

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilestr

Input file name or path of the input file

+
+
k: int

User specified counte of top-k periodic frequent patterns

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
oFilestr

Name of the output file or the path of the output file

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
finalPatterns: dict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
savePatterns(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets()

Scans the dataset or dataframes and stores in list format

+
+
frequentOneItem()

Generates one frequent patterns

+
+
eclatGeneration(candidateList)

It will generate the combinations of frequent items

+
+
generateFrequentPatterns(tidList)

It will generate the combinations of frequent items from a list of items

+
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+
+(.venv) $ python3 kPFPMiner.py <inputFile> <outputFile> <k>
+
+Examples :
+
+(.venv) $  python3 kPFPMiner.py sampleDB.txt patterns.txt 10
+
+
+
+
+

**Sample run of the importing code:

+
import PAMI.periodicFrequentPattern.kPFPMiner as alg
+
+obj = alg.kPFPMiner(iFile, k)
+
+obj.startMine()
+
+periodicFrequentPatterns = obj.getPatterns()
+
+print("Total number of top-k Periodic Frequent Patterns:", len(periodicFrequentPatterns))
+
+obj.save(oFile)
+
+Df = obj.getPatternInDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getPer_Sup(tids)[source]
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+lno = 0
+
+ +
+
+printResults()[source]
+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to a output file

+
+
Parameters:
+

outFile (file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Main function of the program

+
+ +
+
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.recurringPattern.basic.html b/sphinx/_build/html/PAMI.recurringPattern.basic.html new file mode 100644 index 000000000..c062b850f --- /dev/null +++ b/sphinx/_build/html/PAMI.recurringPattern.basic.html @@ -0,0 +1,429 @@ + + + + + + + PAMI.recurringPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.recurringPattern.basic package

+
+

Submodules

+
+
+

PAMI.recurringPattern.basic.RPGrowth module

+
+
+class PAMI.recurringPattern.basic.RPGrowth.RPGrowth(iFile, maxPer, minPS, minRec, sep='\t')[source]
+

Bases: _recurringPatterns

+
+
Description:
+

RPGrowth is one of the fundamental algorithm to discover recurring patterns in a transactional database.

+
+
Reference:
+
    +
  1. Uday Kiran†, Haichuan Shang†, Masashi Toyoda† and Masaru Kitsuregawa† Discovering Recurring Patterns in Time Series,https://www.tkl.iis.u-tokyo.ac.jp/new/uploads/publication_file/file/693/Paper%2023.pdf

  2. +
+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of Recurring patterns

  • +
  • oFile – str : +Name of the output file to store complete set of Recurring patterns

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
  • minPs – str : +It could potentially represent a minimum parallelism percentage or some other value related to parallel processing.

  • +
  • maxPer – float : minRec +It represent a maximum percentage or some other numeric value.

  • +
  • minRec – str : +It could represent a minimum recommended value or some other string-based setting.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the Input file or path of the input file

+
+
oFilefile

Name of the output file or path of the output file

+
+
maxPerint or float or str

The user can specify maxPer either in count or proportion of database size. +If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. +Otherwise, it will be treated as float. +Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float

+
+
minPSint or float or str

The user can specify minPS either in count or proportion of database size. +If the program detects the data type of minPS is integer, then it treats minPS is expressed in count. +Otherwise, it will be treated as float. +Example: minPS=10 will be treated as integer, while minPS=10.0 will be treated as float

+
+
minRecint or float or str

The user has to specify minRec in count.

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

To represent the total no of transaction

+
+
treeclass

To represents the Tree class

+
+
itemSetCountint

To represents the total no of patterns

+
+
finalPatternsdict

To store the complete patterns

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of periodic-frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of periodic-frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets(fileName)

Scans the dataset and stores in a list format

+
+
OneItems()

Extracts the possible recurring items of size one from database

+
+
updateDatabases()

Update the database by removing non recurring items and sort the Database by item decreased support

+
+
buildTree()

After updating the Database, remaining items will be added into the tree by setting root node as null

+
+
convert()

to convert the user specified value

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 RPGrowth.py <inputFile> <outputFile> <maxPer> <minPS> <minRec>
+
+Example usage:
+
+(.venv) $ python3 RPGrowth.py sampleTDB.txt patterns.txt 0.3 0.4 2
+
+
+        .. note:: maxPer and minPS will be considered in percentage of database transactions
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.periodicFrequentPattern.recurring import RPGrowth as alg
+
+obj = alg.RPGrowth(iFile, maxPer, minPS, minRec)
+
+obj.startMine()
+
+periodicFrequentPatterns = obj.getPatterns()
+
+print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns))
+
+obj.savePatterns(oFile)
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by C. Saideep under the supervision of Professor Rage Uday Kiran.

+
+
+
+Mine()[source]
+

Mining process will start from this function

+
+ +
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of periodic-frequent patterns after completion of the mining process

+
+
Returns:
+

returning periodic-frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final periodic-frequent patterns in a dataframe

+
+
Returns:
+

returning periodic-frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+printResults()[source]
+

To print all the results of execution

+
+ +
+
+save(outFile)[source]
+

Complete set of periodic-frequent patterns will be loaded in to a output file

+
+
Parameters:
+

outFile (file) – name of the output file.

+
+
+
+ +
+
+startMine()[source]
+

Mining process will start from this function

+
+ +
+
+ +
+
+

PAMI.recurringPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.recurringPattern.html b/sphinx/_build/html/PAMI.recurringPattern.html new file mode 100644 index 000000000..a9684f3ca --- /dev/null +++ b/sphinx/_build/html/PAMI.recurringPattern.html @@ -0,0 +1,194 @@ + + + + + + + PAMI.recurringPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.relativeFrequentPattern.basic.html b/sphinx/_build/html/PAMI.relativeFrequentPattern.basic.html new file mode 100644 index 000000000..1545cac7e --- /dev/null +++ b/sphinx/_build/html/PAMI.relativeFrequentPattern.basic.html @@ -0,0 +1,428 @@ + + + + + + + PAMI.relativeFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.relativeFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.relativeFrequentPattern.basic.RSFPGrowth module

+
+
+class PAMI.relativeFrequentPattern.basic.RSFPGrowth.RSFPGrowth(iFile: str | DataFrame, minSup: int | float | str, minRS: float, sep: str = '\t')[source]
+

Bases: _frequentPatterns

+
+
Description:
+

Algorithm to find all items with relative support from given dataset

+
+
Reference:
+

‘Towards Efficient Discovery of Frequent Patterns with Relative Support’ R. Uday Kiran and +Masaru Kitsuregawa, http://comad.in/comad2012/pdf/kiran.pdf

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of Relative frequent pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of Relative frequent patterns

  • +
  • minSup – str: +Controls the minimum number of transactions in which every item must appear in a database.

  • +
  • minRS – float: +Controls the minimum number of transactions in which at least one time within a pattern must appear in a database.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the Input file to mine complete set of frequent patterns

+
+
oFilefile

Name of the output file to store complete set of frequent patterns

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
minSupfloat

The user given minSup

+
+
minRSfloat

The user given minRS

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transactions

+
+
treeclass

it represents the Tree class

+
+
itemSetCountint

it represents the total no of patterns

+
+
finalPatternsdict

it represents to store the patterns

+
+
itemSetBufferlist

it represents the store the items in mining

+
+
maxPatternLengthint

it represents the constraint for pattern length

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getFrequentPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getmemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
check(line)

To check the delimiter used in the user input file

+
+
creatingItemSets(fileName)

Scans the dataset or dataframes and stores in list format

+
+
frequentOneItem()

Extracts the one-frequent patterns from transactions

+
+
saveAllCombination(tempBuffer,s,position,prefix,prefixLength)

Forms all the combinations between prefix and tempBuffer lists with support(s)

+
+
saveItemSet(pattern,support)

Stores all the frequent patterns with their respective support

+
+
frequentPatternGrowthGenerate(frequentPatternTree,prefix,port)

Mining the frequent patterns by forming conditional frequentPatternTrees to particular prefix item. +__mapSupport represents the 1-length items with their respective support

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $python3 RSFPGrowth.py <inputFile> <outputFile> <minSup> <__minRatio>
+
+Example Usage :
+
+(.venv) $python3 python3 RSFPGrowth.py sampleDB.txt patterns.txt 0.23 0.2
+
+
+        .. note:: maxPer and minPS will be considered in percentage of database transactions
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.relativeFrequentPattern import RSFPGrowth as alg
+
+obj = alg.RSFPGrowth(iFile, minSup, __minRatio)
+
+obj.startMine()
+
+frequentPatterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(frequentPatterns))
+
+obj.save(oFile)
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getmemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by Sai Chitra.B under the supervision of Professor Rage Uday Kiran.

+
+
+
+Mine() None[source]
+

Main program to start the operation +:return: None

+
+ +
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, str][source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+printResults() None[source]
+

This function is used to print the results +:return: None

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (file) – name of the output file.

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Main program to start the operation +:return: None

+
+ +
+
+ +
+
+

PAMI.relativeFrequentPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.relativeFrequentPattern.html b/sphinx/_build/html/PAMI.relativeFrequentPattern.html new file mode 100644 index 000000000..d9270637e --- /dev/null +++ b/sphinx/_build/html/PAMI.relativeFrequentPattern.html @@ -0,0 +1,194 @@ + + + + + + + PAMI.relativeFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.relativeHighUtilityPattern.basic.html b/sphinx/_build/html/PAMI.relativeHighUtilityPattern.basic.html new file mode 100644 index 000000000..154732f72 --- /dev/null +++ b/sphinx/_build/html/PAMI.relativeHighUtilityPattern.basic.html @@ -0,0 +1,467 @@ + + + + + + + PAMI.relativeHighUtilityPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.relativeHighUtilityPattern.basic package

+
+

Submodules

+
+
+

PAMI.relativeHighUtilityPattern.basic.RHUIM module

+
+
+class PAMI.relativeHighUtilityPattern.basic.RHUIM.RHUIM(iFile: str, minUtil: int, minUR: float, sep: str = '\t')[source]
+

Bases: _utilityPatterns

+
+
Description:
+

RHUIM algorithm helps us to mine Relative High Utility itemSets from transactional databases.

+
+
Reference:
+

R. U. Kiran, P. Pallikila, J. M. Luna, P. Fournier-Viger, M. Toyoda and P. K. Reddy, +“Discovering Relative High Utility Itemsets in Very Large Transactional Databases Using Null-Invariant Measure,”

+
+

2021 IEEE International Conference on Big Data (Big Data), Orlando, FL, USA, 2021, pp. 252-262, +doi: 10.1109/BigData52589.2021.9672064.

+
+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of Relative High Utility patterns

  • +
  • oFile – str : +Name of the output file to store complete set of Relative High Utility patterns

  • +
  • minSup – float or int or str : +minSup measure constraints the minimum number of transactions in a database where a pattern must appear +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
  • minUtil – int : +The minimum utility threshold.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the input file to mine complete set of patterns

+
+
oFilefile

Name of the output file to store complete set of patterns

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
minUtilint

The user given minUtil value

+
+
minURfloat

The user given minUR value

+
+
relativeHighUtilityItemSetsmap

set of relative high utility itemSets

+
+
candidateCountint

Number of candidates

+
+
utilityBinArrayLUlist

A map to hold the local utility values of the items in database

+
+
utilityBinArraySUlist

A map to hold the subtree utility values of the items is database

+
+
oldNamesToNewNameslist

A map which contains old names, new names of items as key value pairs

+
+
newNamesToOldNameslist

A map which contains new names, old names of items as key value pairs

+
+
maxMemoryfloat

Maximum memory used by this program for running

+
+
patternCountint

Number of RHUI’s

+
+
itemsToKeeplist

keep only the promising items i.e items that can extend other items to form RHUIs

+
+
itemsToExplorelist

list of items that needs to be explored

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
backTrackingRHUIM(transactionsOfP, itemsToKeep, itemsToExplore, prefixLength)

A method to mine the RHUIs Recursively

+
+
useUtilityBinArraysToCalculateUpperBounds(transactionsPe, j, itemsToKeep)

A method to calculate the sub-tree utility and local utility of all items that can extend itemSet P and e

+
+
output(tempPosition, utility)

A method to output a relative-high-utility itemSet to file or memory depending on what the user chose

+
+
is_equal(transaction1, transaction2)

A method to Check if two transaction are identical

+
+
useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(dataset)

A method to calculate the sub tree utility values for single items

+
+
sortDatabase(self, transactions)

A Method to sort transaction

+
+
sort_transaction(self, trans1, trans2)

A Method to sort transaction

+
+
useUtilityBinArrayToCalculateLocalUtilityFirstTime(self, dataset)

A method to calculate local utility values for single itemSets

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 RHUIM.py <inputFile> <outputFile> <minUtil> <sep>
+
+Example usage:
+
+(.venv) $ python3 RHUIM.py sampleTDB.txt output.txt 35 20
+
+
+        .. note:: minSup will be considered in times of minSup and count of database transactions
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.relativeHighUtilityPattern.basic import RHUIM as alg
+
+obj=alg.RHUIM("input.txt", 35, 20)
+
+obj.startMine()
+
+frequentPatterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(frequentPatterns))
+
+obj.savePatterns(oFile)
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getmemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by Pradeep Pallikila under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() dict[source]
+

Function to send the set of patterns after completion of the mining process

+
+
Returns:
+

returning patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final patterns in a dataframe

+
+
Returns:
+

returning patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+printResults() None[source]
+

This function is used to print the results +:return: None

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+sortDatabase(transactions: list) None[source]
+

A Method to sort transaction

+
+
Attributes:
+

+
Parameters:
+

transactions (list) – transaction of items

+
+
Returns:
+

sorted transactions.

+
+
Return type:
+

Transactions or list

+
+
+
+ +
+
+sort_transaction(trans1: _Transaction, trans2: _Transaction) int[source]
+

A Method to sort transaction

+
+
Attributes:
+

+
Parameters:
+

trans1 (Transaction) – the first transaction .

+
+
+

:param trans2:the second transaction. +:type trans2: Transaction +:return: sorted transaction. +:rtype: Transaction

+
+ +
+
+startMine() None[source]
+

Mining process will start from this function +:return: None

+
+ +
+
+ +
+
+

PAMI.relativeHighUtilityPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.relativeHighUtilityPattern.html b/sphinx/_build/html/PAMI.relativeHighUtilityPattern.html new file mode 100644 index 000000000..cc210c221 --- /dev/null +++ b/sphinx/_build/html/PAMI.relativeHighUtilityPattern.html @@ -0,0 +1,195 @@ + + + + + + + PAMI.relativeHighUtilityPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.sequence.html b/sphinx/_build/html/PAMI.sequence.html new file mode 100644 index 000000000..d781e9fce --- /dev/null +++ b/sphinx/_build/html/PAMI.sequence.html @@ -0,0 +1,166 @@ + + + + + + + PAMI.sequence package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.sequence package

+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.sequentialPatternMining.basic.html b/sphinx/_build/html/PAMI.sequentialPatternMining.basic.html new file mode 100644 index 000000000..e83900851 --- /dev/null +++ b/sphinx/_build/html/PAMI.sequentialPatternMining.basic.html @@ -0,0 +1,1088 @@ + + + + + + + PAMI.sequentialPatternMining.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.sequentialPatternMining.basic package

+
+

Submodules

+
+
+

PAMI.sequentialPatternMining.basic.SPADE module

+
+
+class PAMI.sequentialPatternMining.basic.SPADE.SPADE(iFile, minSup, sep='\t')[source]
+

Bases: _sequentialPatterns

+
+
Description:
+
    +
  • SPADE is one of the fundamental algorithm to discover sequential frequent patterns in a transactional database.

  • +
  • This program employs SPADE property (or downward closure property) to reduce the search space effectively.

  • +
  • This algorithm employs breadth-first search technique when 1-2 length patterns and depth-first serch when above 3 length patterns to find the complete set of frequent patterns in a transactional database.

  • +
+
+
Reference:
+

Mohammed J. Zaki. 2001. SPADE: An Efficient Algorithm for Mining Frequent Sequences. Mach. Learn. 42, 1-2 (January 2001), 31-60. DOI=10.1023/A:1007652502315 http://dx.doi.org/10.1023/A:1007652502315

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of Sequential frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of Sequential frequent patterns

  • +
  • minSup – float or int or str : +minSup measure constraints the minimum number of transactions in a database where a pattern must appear +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilestr

Input file name or path of the input file

+
+
oFilestr

Name of the output file or the path of output file

+
+
minSup: float or int or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
finalPatterns: dict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
_xLenDatabase: dict

To store the datas in different sequence separated by sequence, rownumber, length.

+
+
_xLenDatabaseSamedict

To store the datas in same sequence separated by sequence, rownumber, length.

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
savePatterns(oFile)

Complete set of frequent patterns will be loaded in to an output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
candidateToFrequent(candidateList)

Generates frequent patterns from the candidate patterns

+
+
frequentToCandidate(frequentList, length)

Generates candidate patterns from the frequent patterns

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 SPADE.py <inputFile> <outputFile> <minSup>
+
+Example usage:
+
+(.venv) $ python3 SPADE.py sampleDB.txt patterns.txt 10.0
+
+
+        .. note:: minSup will be considered in times of minSup and count of database transactions
+
+
+
+
+

Importing this algorithm into a python program

+
import PAMI.sequentialPatternMining.basic.SPADE as alg
+
+obj = alg.SPADE(iFile, minSup)
+
+obj.startMine()
+
+sequentialPatternMining = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(frequentPatterns))
+
+obj.save(oFile)
+
+Df = obj.getPatternInDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by Suzuki Shota under the supervision of Professor Rage Uday Kiran.

+
+
+
+Mine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+make1LenDatabase()[source]
+

To make 1 length frequent patterns by breadth-first search technique and update Database to sequential database

+
+ +
+
+make2LenDatabase()[source]
+

To make 2 length frequent patterns by joining two one length patterns by breadth-first search technique and update xlen Database to sequential database

+
+ +
+
+make3LenDatabase()[source]
+

To call each 2 length patterns to make 3 length frequent patterns depth-first search technique

+
+ +
+
+makeNextRow(bs, latestWord, latestWord2)[source]
+

To make pattern row when two patterns have the latest word in different sequence

+

:param bs : previous pattern without the latest one +:param latestWord : latest word of one previous pattern +:param latestWord2 : latest word of other previous pattern

+
+ +
+
+makeNextRowSame(bs, latestWord, latestWord2)[source]
+

To make pattern row when one pattern have the latestWord1 in different sequence and other(latestWord2) in same

+

:param bs : previous pattern without the latest one +:param latestWord : latest word of one previous pattern in same sequence +:param latestWord2 : latest word of other previous pattern in different sequence

+
+ +
+
+makeNextRowSame2(bs, latestWord, latestWord2)[source]
+

To make pattern row when two patterns have the latest word in same sequence

+

:param bs : previous pattern without the latest one +:param latestWord : latest word of one previous pattern +:param latestWord2 : latest word of the other previous pattern

+
+ +
+
+makeNextRowSame3(bs, latestWord, latestWord2)[source]
+

To make pattern row when two patterns have the latest word in different sequence and both latest word is in same sequence

+

:param bs : previous pattern without the latest one +:param latestWord : latest word of one previous pattern +:param latestWord2 : latest word of other previous pattern

+
+ +
+
+makexLenDatabase(rowLen, bs, latestWord)[source]
+

To make “rowLen” length frequent patterns from pattern which the latest word is in same seq by joining “rowLen”-1 length patterns by depth-first search technique and update xlenDatabase to sequential database

+
+
Parameters:
+

rowLen – row length of patterns.

+
+
+

:param bs : patterns without the latest one +:param latestWord : latest word of patterns

+
+ +
+
+makexLenDatabaseSame(rowLen, bs, latestWord)[source]
+

To make 3 or more length frequent patterns from pattern which the latest word is in different seq by depth-first search technique and update xlenDatabase to sequential database

+
+
Parameters:
+

rowLen – row length of previous patterns.

+
+
+

:param bs : previous patterns without the latest one +:param latestWord : latest word of previous patterns

+
+ +
+
+printResults()[source]
+

This function is used to prnt the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

PAMI.sequentialPatternMining.basic.SPAM module

+
+
+class PAMI.sequentialPatternMining.basic.SPAM.SPAM(iFile, minSup, sep='\t')[source]
+

Bases: _sequentialPatterns

+
+
Description:
+

SPAM is one of the fundamental algorithm to discover sequential frequent patterns in a transactional database. +This program employs SPAM property (or downward closure property) to reduce the search space effectively. +This algorithm employs breadth-first search technique to find the complete set of frequent patterns in a sequential database.

+
+
Reference:
+
    +
  1. Ayres, J. Gehrke, T.Yiu, and J. Flannick. Sequential Pattern Mining Using Bitmaps. In Proceedings of the Eighth ACM SIGKDD International Conference on Knowledge Discovery and Data Mining. Edmonton, Alberta, Canada, July 2002.

  2. +
+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of Sequential frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of Sequential frequent patterns

  • +
  • minSup – float or int or str : +minSup measure constraints the minimum number of transactions in a database where a pattern must appear +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilestr

Input file name or path of the input file

+
+
oFilestr

Name of the output file or the path of output file

+
+
minSupfloat or int or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the sequences of a database in list

+
+
_idDatabasedict

To store the sequences of a database by bit map

+
+
_maxSeqLen:

the maximum length of subsequence in sequence.

+
+
+
+
Methods:
+
+
_creatingItemSets():

Storing the complete sequences of the database/input file in a database variable

+
+
_convert(value):

To convert the user specified minSup value

+
+
make2BitDatabase():

To make 1 length frequent patterns by breadth-first search technique and update Database to sequential database

+
+
DfsPruning(items,sStep,iStep):

the main algorithm of spam. This can search sstep and istep items and find next patterns, its sstep, and its istep. And call this function again by using them. Recursion until there are no more items available for exploration.

+
+
Sstep(s):

To convert bit to ssteo bit.The first time you get 1, you set it to 0 and subsequent ones to 1.(like 010101=>001111, 00001001=>00000111)

+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
savePatterns(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
candidateToFrequent(candidateList)

Generates frequent patterns from the candidate patterns

+
+
frequentToCandidate(frequentList, length)

Generates candidate patterns from the frequent patterns

+
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 SPAM.py <inputFile> <outputFile> <minSup> (<separator>)
+
+Examples usage:
+
+(.venv) $ python3 SPAM.py sampleDB.txt patterns.txt 10.0
+
+
+        .. note:: minSup will be considered in times of minSup and count of database transactions
+
+
+
+
+

Sample run of the importing code:

+
+

import PAMI.sequentialPatternMining.basic.SPAM as alg

+

obj = alg.SPAM(iFile, minSup)

+

obj.startMine()

+

sequentialPatternMining = obj.getPatterns()

+

print(“Total number of Frequent Patterns:”, len(frequentPatterns))

+

obj.savePatterns(oFile)

+

Df = obj.getPatternInDataFrame()

+

memUSS = obj.getMemoryUSS()

+

print(“Total Memory in USS:”, memUSS)

+

memRSS = obj.getMemoryRSS()

+

print(“Total Memory in RSS”, memRSS)

+

run = obj.getRuntime()

+

print(“Total ExecutionTime in seconds:”, run)

+
+
+
+

Credits:

+
+

The complete program was written by Shota Suzuki under the supervision of Professor Rage Uday Kiran.

+
+
+
+DfsPruning(items, sStep, iStep)[source]
+

the main algorithm of spam. This can search sstep and istep items and find next patterns, its sstep, and its istep. And call this function again by using them. Recursion until there are no more items available for exploration.

+
+
Attributes:
+

+
+
+
itemsstr

The pattrens I got before

+
+
sSteplist

Items presumed to have “sstep” relationship with “items”.(sstep is What appears later like a-b and a-c)

+
+
iSteplist

Items presumed to have “istep” relationship with “items”(istep is What appears in same time like ab and ac)

+
+
+
+ +
+
+Sstep(s)[source]
+

To convert bit to Sstep bit.The first time you get 1, you set it to 0 and subsequent ones to 1.(like 010101=>001111, 00001001=>00000111)

+
+
:param s:list

to store each bit sequence

+
+
+
+
Returns:
+

nextS:list to store the bit sequence converted by sstep

+
+
+
+ +
+
+countSup(n)[source]
+

count support

+
+
:param n:list

to store each bit sequence

+
+
+
+
Returns:
+

count: int support of this list

+
+
+
+ +
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function +:return: returning RSS memory consumed by the mining process +:rtype: float

+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function +:return: returning USS memory consumed by the mining process +:rtype: float

+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process +:return: returning frequent patterns +:rtype: dict

+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe +:return: returning frequent patterns in a dataframe +:rtype: pd.DataFrame

+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process +:return: returning total amount of runtime taken by the mining process +:rtype: float

+
+ +
+
+make2BitDatabase()[source]
+

To make 1 length frequent patterns by breadth-first search technique and update Database to sequential database

+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file +:param outFile: name of the output file +:type outFile: file

+
+ +
+
+startMine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

PAMI.sequentialPatternMining.basic.abstract module

+
+
+

PAMI.sequentialPatternMining.basic.prefixSpan module

+
+
+class PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan(iFile, minSup, sep='\t')[source]
+

Bases: _sequentialPatterns

+
+
Description:
+
    +
  • Prefix Span is one of the fundamental algorithm to discover sequential frequent patterns in a transactional database.

  • +
  • This program employs Prefix Span property (or downward closure property) to reduce the search space effectively.

  • +
  • This algorithm employs depth-first search technique to find the complete set of frequent patterns in a transactional database.

  • +
+
+
Reference:
+
    +
  1. Pei, J. Han, B. Mortazavi-Asl, J. Wang, H. Pinto, Q. Chen, U. Dayal, M. Hsu: Mining Sequential Patterns by Pattern-Growth: The PrefixSpan Approach. IEEE Trans. Knowl. Data Eng. 16(11): 1424-1440 (2004)

  2. +
+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of Sequential frequent patterns

  • +
  • oFile – str : +Name of the output file to store complete set of Sequential frequent patterns

  • +
  • minSup – float or int or str : +minSup measure constraints the minimum number of transactions in a database where a pattern must appear +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilestr

Input file name or path of the input file

+
+
oFilestr

Name of the output file or the path of output file

+
+
minSupfloat or int or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
savePatterns(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
candidateToFrequent(candidateList)

Generates frequent patterns from the candidate patterns

+
+
frequentToCandidate(frequentList, length)

Generates candidate patterns from the frequent patterns

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 prefixSpan.py <inputFile> <outputFile> <minSup>
+
+Example usage:
+
+(.venv) $ python3 prefixSpan.py sampleDB.txt patterns.txt 10
+
+
+        .. note:: minSup will be considered in support count or frequency
+
+
+
+
+

Importing this algorithm into a python program

+
import PAMI.frequentPattern.basic.prefixSpan as alg
+
+obj = alg.prefixSpan(iFile, minSup)
+
+obj.startMine()
+
+frequentPatterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(frequentPatterns))
+
+obj.save(oFile)
+
+Df = obj.getPatternInDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by Suzuki Shota under the supervision of Professor Rage Uday Kiran.

+
+
+
+Mine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getSameSeq(startrow)[source]
+
+

To get words in the latest sequence

+
+
+
Parameters:
+

startrow – the patterns get before

+
+
+
+ +
+
+makeNext(sepDatabase, startrow)[source]
+

To get next pattern by adding head word to next sequence of startrow

+
+
Parameters:
+
    +
  • sepDatabase – dict +what words and rows startrow have to add it.

  • +
  • startrow – the patterns get before

  • +
+
+
+
+ +
+
+makeNextSame(sepDatabase, startrow)[source]
+
+

To get next pattern by adding head word to the latest sequence of startrow

+
+
+
Parameters:
+
    +
  • sepDatabase – dict +what words and rows startrow have to add it

  • +
  • startrow – the patterns get before

  • +
+
+
+
+ +
+
+makeSeqDatabaseFirst(database)[source]
+

To make 1 length sequence dataset list which start from same word. It was stored only 1 from 1 line.

+
+
Parameters:
+

database – To store the transactions of a database in list

+
+
+
+ +
+
+makeSeqDatabaseSame(database, startrow)[source]
+

To make sequence dataset list which start from same word(head). It was stored only 1 from 1 line. +And it separated by having head in the latest sequence of startrow or not.

+
+
Parameters:
+
    +
  • database – To store the transactions of a database in list

  • +
  • startrow – the patterns get before

  • +
+
+
+
+ +
+
+makeSupDatabase(database, head)[source]
+

To delete not frequent words without words in the latest sequence

+
+
Parameters:
+

database – list +database of lines having same startrow and head word

+
+
+
+
:param head:list

words in the latest sequence

+
+
+
+
Returns:
+

changed database

+
+
+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+serchSame(database, startrow, give)[source]
+

To get 2 or more length patterns in same sequence.

+
+
Parameters:
+
    +
  • database – list +To store the transactions of a database in list which have same startrow and head word

  • +
  • startrow – list +the patterns get before

  • +
  • give – list +the word in the latest sequence of startrow

  • +
+
+
+
+ +
+
+startMine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.sequentialPatternMining.closed.html b/sphinx/_build/html/PAMI.sequentialPatternMining.closed.html new file mode 100644 index 000000000..5c40427ec --- /dev/null +++ b/sphinx/_build/html/PAMI.sequentialPatternMining.closed.html @@ -0,0 +1,176 @@ + + + + + + + PAMI.sequentialPatternMining.closed package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.sequentialPatternMining.closed package

+
+

Submodules

+
+
+

PAMI.sequentialPatternMining.closed.abstract module

+
+
+

PAMI.sequentialPatternMining.closed.bide module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.sequentialPatternMining.html b/sphinx/_build/html/PAMI.sequentialPatternMining.html new file mode 100644 index 000000000..0263e46ba --- /dev/null +++ b/sphinx/_build/html/PAMI.sequentialPatternMining.html @@ -0,0 +1,250 @@ + + + + + + + PAMI.sequentialPatternMining package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.sequentialPatternMining package

+
+

Subpackages

+
+ +
+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.stablePeriodicFrequentPattern.basic.html b/sphinx/_build/html/PAMI.stablePeriodicFrequentPattern.basic.html new file mode 100644 index 000000000..b79cd2ab4 --- /dev/null +++ b/sphinx/_build/html/PAMI.stablePeriodicFrequentPattern.basic.html @@ -0,0 +1,417 @@ + + + + + + + PAMI.stablePeriodicFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.stablePeriodicFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.stablePeriodicFrequentPattern.basic.SPPEclat module

+
+
+class PAMI.stablePeriodicFrequentPattern.basic.SPPEclat.SPPEclat(inputFile, minSup, maxPer, maxLa, sep='\t')[source]
+

Bases: _stablePeriodicFrequentPatterns

+
+
Description:
+

Stable periodic pattern mining aims to dicover all interesting patterns in a temporal database using three contraints minimum support, +maximum period and maximum lability, that have support no less than the user-specified minimum support constraint and lability no +greater than maximum lability.

+
+
Reference:
+

Fournier-Viger, P., Yang, P., Lin, J. C.-W., Kiran, U. (2019). Discovering Stable Periodic-Frequent Patterns in Transactional Data. Proc. +32nd Intern. Conf. on Industrial, Engineering and Other Applications of Applied Intelligent Systems (IEA AIE 2019), Springer LNAI, pp. 230-244

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of stable periodic Frequent Pattern.

  • +
  • oFile – str : +Name of the output file to store complete set of stable periodic Frequent Pattern.

  • +
  • minSup – float or int or str : +The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

  • +
  • itemSup – int or float : +Frequency of an item

  • +
  • maxLa – float : +minimum loss of a pattern

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the Input file or path of the input file

+
+
oFilefile

Name of the output file or path of the output file

+
+
minSupint or float or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
maxPerint or float or str

The user can specify maxPer either in count or proportion of database size. +If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. +Otherwise, it will be treated as float. +Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float

+
+
maxLaint or float or str

The user can specify maxLa either in count or proportion of database size. +If the program detects the data type of maxLa is integer, then it treats maxLa is expressed in count. +Otherwise, it will be treated as float. +Example: maxLa=10 will be treated as integer, while maxLa=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transactions

+
+
treeclass

it represents the Tree class

+
+
itemSetCountint

it represents the total no of patterns

+
+
finalPatternsdict

it represents to store the patterns

+
+
tidListdict

stores the timestamps of an item

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of periodic-frequent patterns will be loaded in to an output file

+
+
getPatternsAsDataFrame()

Complete set of periodic-frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets()

Scan the database and store the items with their timestamps which are periodic frequent

+
+
calculateLa()

Calculates the support and period for a list of timestamps.

+
+
Generation()

Used to implement prefix class equivalence method to generate the periodic patterns recursively

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 basic.py <inputFile> <outputFile> <minSup> <maxPer> <maxLa>
+
+Example usage:
+
+(.venv) $ python3 basic.py sampleDB.txt patterns.txt 10.0 4.0 2.0
+
+
+        .. note:: constraints will be considered in percentage of database transactions
+
+
+
+
+

Importing this algorithm into a python program

+

… code-block:: python

+
+

from PAMI.stablePeriodicFrequentPattern.basic import basic as alg

+

obj = alg.PFPECLAT(“../basic/sampleTDB.txt”, 5, 3, 3)

+

obj.startMine()

+

Patterns = obj.getPatterns()

+

print(“Total number of Stable Periodic Frequent Patterns:”, len(Patterns))

+

obj.save(“patterns”)

+

Df = obj.getPatternsAsDataFrame()

+

memUSS = obj.getMemoryUSS()

+

print(“Total Memory in USS:”, memUSS)

+

memRSS = obj.getMemoryRSS()

+

print(“Total Memory in RSS”, memRSS)

+

run = obj.getRuntime()

+

print(“Total ExecutionTime in seconds:”, run)

+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function +:return: returning RSS memory consumed by the mining process +:rtype: float

+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function +:return: returning USS memory consumed by the mining process +:rtype: float

+
+ +
+
+getPatterns()[source]
+

Function to return the set of stable periodic-frequent patterns after completion of the mining process

+
+
Returns:
+

returning stable periodic-frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final periodic-frequent patterns in a dataframe

+
+
Returns:
+

returning periodic-frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine()[source]
+

Method to start the mining of patterns

+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of periodic-frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Method to start the mining of patterns

+
+ +
+
+ +
+
+

PAMI.stablePeriodicFrequentPattern.basic.SPPGrowth module

+
+
+

PAMI.stablePeriodicFrequentPattern.basic.SPPGrowthDump module

+
+
+

PAMI.stablePeriodicFrequentPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.stablePeriodicFrequentPattern.html b/sphinx/_build/html/PAMI.stablePeriodicFrequentPattern.html new file mode 100644 index 000000000..39c5253a6 --- /dev/null +++ b/sphinx/_build/html/PAMI.stablePeriodicFrequentPattern.html @@ -0,0 +1,216 @@ + + + + + + + PAMI.stablePeriodicFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.stablePeriodicFrequentPattern.topK.html b/sphinx/_build/html/PAMI.stablePeriodicFrequentPattern.topK.html new file mode 100644 index 000000000..dacfbf8cd --- /dev/null +++ b/sphinx/_build/html/PAMI.stablePeriodicFrequentPattern.topK.html @@ -0,0 +1,422 @@ + + + + + + + PAMI.stablePeriodicFrequentPattern.topK package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.stablePeriodicFrequentPattern.topK package

+
+

Submodules

+
+
+

PAMI.stablePeriodicFrequentPattern.topK.TSPIN module

+
+
+class PAMI.stablePeriodicFrequentPattern.topK.TSPIN.TSPIN(iFile, maxPer, maxLa, k, sep='\t')[source]
+

Bases: _stablePeriodicFrequentPatterns

+
+
Description:
+

TSPIN is an algorithm to discover top stable periodic-frequent patterns in a transactional database.

+
+
Reference:
+

Fournier-Viger, P., Wang, Y., Yang, P. et al. TSPIN: mining top-k stable periodic patterns. +Appl Intell 52, 6917–6938 (2022). https://doi.org/10.1007/s10489-020-02181-6

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of frequent pattern’s

  • +
  • oFile – str : +Name of the output file to store complete set of frequent patterns

  • +
  • maxPer – float: +Maximum number of frequent patterns to be included in the output file.

  • +
  • maxLa – str: +Maximum number of frequent patterns to be included in the output file.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the Input file or path of the input file

+
+
oFilefile

Name of the output file or path of the output file

+
+
maxPerint or float or str

The user can specify maxPer either in count or proportion of database size. +If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. +Otherwise, it will be treated as float. +Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float

+
+
maxLaint or float or str

The user can specify maxLa either in count or proportion of database size. +If the program detects the data type of maxLa is integer, then it treats maxLa is expressed in count. +Otherwise, it will be treated as float. +Example: maxLa=10 will be treated as integer, while maxLa=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

To represent the total no of transaction

+
+
treeclass

To represents the Tree class

+
+
itemSetCountint

To represents the total no of patterns

+
+
finalPatternsdict

To store the complete patterns

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of periodic-frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of periodic-frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets(fileName)

Scans the dataset and stores in a list format

+
+
PeriodicFrequentOneItem()

Extracts the one-periodic-frequent patterns from database

+
+
updateDatabases()

Update the database by removing aperiodic items and sort the Database by item decreased support

+
+
buildTree()

After updating the Database, remaining items will be added into the tree by setting root node as null

+
+
convert()

to convert the user specified value

+
+
+
+
+
+

Methods to execute code on terminal

+
+
+
Format:
>>>   python3 TSPIN.py <inputFile> <outputFile> <maxPer> <maxLa>
+
+
+
+
Example:
>>>  python3 TSPIN.py sampleTDB.txt patterns.txt 0.3 0.4 0.6
+
+
+
+

Note

+

maxPer, maxLa and k will be considered in percentage of database transactions

+
+
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.stablePeriodicFrequentPattern.basic import TSPIN as alg
+
+obj = alg.TSPIN(iFile, maxPer, maxLa, k)
+
+obj.startMine()
+
+stablePeriodicFrequentPatterns = obj.getPatterns()
+
+print("Total number of Periodic Frequent Patterns:", len(stablePeriodicFrequentPatterns))
+
+obj.savePatterns(oFile)
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() dict[source]
+

Function to send the set of periodic-frequent patterns after completion of the mining process

+
+
Returns:
+

returning periodic-frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final periodic-frequent patterns in a dataframe

+
+
Returns:
+

returning periodic-frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+printResults() None[source]
+

This function is used to print the results

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of periodic-frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (file) – name of the output file

+
+
+
+ +
+
+startMine() None[source]
+

Mining process will start from this function

+
+ +
+
+ +
+
+

PAMI.stablePeriodicFrequentPattern.topK.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.subgraphMining.basic.html b/sphinx/_build/html/PAMI.subgraphMining.basic.html new file mode 100644 index 000000000..2336f3a1a --- /dev/null +++ b/sphinx/_build/html/PAMI.subgraphMining.basic.html @@ -0,0 +1,715 @@ + + + + + + + PAMI.subgraphMining.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.subgraphMining.basic package

+
+

Submodules

+
+
+

PAMI.subgraphMining.basic.abstract module

+
+
+

PAMI.subgraphMining.basic.dfsCode module

+
+
+class PAMI.subgraphMining.basic.dfsCode.DFSCode[source]
+

Bases: object

+
+
+add(ee)[source]
+

The add function in adds elements to the EE list while updating the rightmost element and path +based on certain conditions.

+
+ +
+
+containEdge(v1, v2)[source]
+
+ +
+
+copy()[source]
+
+ +
+
+getAllVLabels()[source]
+

This function retrieves all vertex labels from the extended edge list and returns them in a list.

+
+ +
+
+getAt(index)[source]
+
+ +
+
+getEeList()[source]
+
+ +
+
+getRightMost()[source]
+
+ +
+
+getRightMostPath()[source]
+
+ +
+
+isEmpty()[source]
+
+ +
+
+notPreOfRm(v)[source]
+

This function checks if a given value is not the second-to-last element on the +rightMostPath given a vertex.

+
+ +
+
+onRightMostPath(v)[source]
+
+ +
+ +
+
+

PAMI.subgraphMining.basic.edge module

+
+
+class PAMI.subgraphMining.basic.edge.Edge(v1, v2, edgeLabel)[source]
+

Bases: object

+
+
+another(v)[source]
+
+ +
+
+getEdgeLabel()[source]
+
+ +
+ +
+
+

PAMI.subgraphMining.basic.extendedEdge module

+
+
+class PAMI.subgraphMining.basic.extendedEdge.ExtendedEdge(v1, v2, vLabel1, vLabel2, edgeLabel)[source]
+

Bases: object

+
+
+getEdgeLabel()[source]
+
+ +
+
+getV1()[source]
+
+ +
+
+getV2()[source]
+
+ +
+
+getVLabel1()[source]
+
+ +
+
+getVLabel2()[source]
+
+ +
+
+pairSmallerThan(x1, x2, y1, y2)[source]
+
+ +
+
+smallerThan(that)[source]
+
+ +
+
+smallerThanOriginal(that)[source]
+
+ +
+ +
+
+

PAMI.subgraphMining.basic.frequentSubgraph module

+
+
+class PAMI.subgraphMining.basic.frequentSubgraph.FrequentSubgraph(dfsCode, setOfGraphsIds, support)[source]
+

Bases: object

+
+ +
+
+

PAMI.subgraphMining.basic.graph module

+
+
+class PAMI.subgraphMining.basic.graph.Graph(id, vMap=None, dfsCode=None)[source]
+

Bases: object

+
+
+emptyIntegerArray = []
+
+ +
+
+emptyVertexList = []
+
+ +
+
+findAllWithLabel(targetLabel)[source]
+
+ +
+
+getAllNeighbors(v)[source]
+
+ +
+
+getAllVertices()[source]
+
+ +
+
+getEdge(v1, v2)[source]
+
+ +
+
+getEdgeCount()[source]
+
+ +
+
+getEdgeLabel(v1, v2)[source]
+
+ +
+
+getId()[source]
+
+ +
+
+getNonPrecalculatedAllVertices()[source]
+
+ +
+
+getVLabel(v)[source]
+
+ +
+
+isNeighboring(v1, v2)[source]
+
+ +
+
+precalculateLabelsToVertices()[source]
+

This function precalculates and stores mappings of vertex labels to their corresponding vertex IDs.

+
+ +
+
+precalculateVertexList()[source]
+

The function precalculateVertexList creates a list of vertices by iterating through a dictionary of +vertices.

+
+ +
+
+precalculateVertexNeighbors()[source]
+

The function precalculates the neighbors of each vertex in a graph and stores them in a cache.

+
+ +
+
+removeInfrequentLabel(label)[source]
+

The function removes vertices with a specific label from the graph and updates the edges accordingly.

+
+ +
+ +
+
+

PAMI.subgraphMining.basic.gspan module

+
+
+class PAMI.subgraphMining.basic.gspan.GSpan(iFile, minSupport, outputSingleVertices=True, maxNumberOfEdges=inf, outputGraphIds=False)[source]
+

Bases: _gSpan

+
+
+class Pair(x, y)[source]
+

Bases: object

+
+ +
+
+edge_count_pruning = True
+
+ +
+
+eliminate_infrequent_edge_labels = True
+
+ +
+
+eliminate_infrequent_vertex_pairs = True
+
+ +
+
+eliminate_infrequent_vertices = True
+
+ +
+
+findAllOnlyOneVertex(graphDb, outputFrequentVertices)[source]
+

The function findAllOnlyOneVertex iterates through a graph database to find frequent vertices +based on a minimum support threshold, storing the results and optionally removing infrequent +vertices.

+
+
Parameters:
+

graphDb – The graphDb parameter refers to a graph database that the algorithm is

+
+
+

operating on. +:param outputFrequentVertices: The outputFrequentVertices parameter is a boolean flag that +determines whether the frequent vertices should be included in the output or not.

+
+ +
+
+gSpan(graphDb, outputFrequentVertices)[source]
+

The gSpan function in Python processes a graph database by precalculating vertex lists, removing +infrequent vertex pairs, and performing a depth-first search algorithm.

+
+
Parameters:
+

graphDb – The graphDb parameter refers to a graph database that the algorithm is

+
+
+

operating on. +:param outputFrequentVertices: The outputFrequentVertices parameter is a boolean flag that +determines whether the frequent vertices should be output or not.

+
+ +
+
+getFrequentSubgraphs()[source]
+
+ +
+
+getMemoryRSS()[source]
+
+ +
+
+getMemoryUSS()[source]
+
+ +
+
+getRuntime()[source]
+
+ +
+
+gspanDFS(c: DFSCode, graphDb, subgraphId)[source]
+

The gspanDFS function recursively explores graph patterns using the gSpan algorithm to find +frequent subgraphs in a graph database.

+
+
Parameters:
+

c – In the provided code snippet, the parameter c is an instance of the _ab.DFSCode class.

+
+
+

It is used as an input to the gspanDFS method for performing Depth-First Search (DFS) traversal in +a graph mining algorithm. The c parameter represents +:type c: _ab.DFSCode +:param graphDb: The graphDb parameter refers to a graph database that the algorithm is +operating on. +:param subgraphId: The subgraphId parameter in the gspanDFS method refers to an +ID represents a specific subgraph within the graph database graphDb. +:return: The gspanDFS method is a recursive function that is called within itself to explore the graph +structure and find frequent subgraphs. The function does not have a return value, but it modifies +the self.frequentSubgraphs list by appending new frequent subgraphs found during the DFS traversal.

+
+ +
+
+isCanonical(c: DFSCode)[source]
+

The function isCanonical checks if a given DFS code is canonical by comparing it with its +rightmost path extensions.

+
+
Parameters:
+

c (_ab.DFSCode) – The parameter c is an instance of the _ab.DFSCode class

+
+
Returns:
+

a boolean value. It returns True if the input DFSCode c is canonical, and False if it is

+
+
+

not canonical.

+
+ +
+
+readGraphs(path)[source]
+

The readGraphs function reads graph data from a file and constructs a list of graphs with vertices +and edges.

+
+
Parameters:
+

path – The path parameter in the readGraphs method is the file path to the text file

+
+
+

containing the graph data that needs to be read and processed. This method reads the graph data from +the specified file and constructs a list of graphs represented by vertices and edges based on the +information in the +:return: The readGraphs method reads graph data from a file specified by the path parameter. It +parses the data to create a list of graph objects and returns this list. Each graph object contains +information about vertices and edges within the graph.

+
+ +
+
+removeInfrequentVertexPairs(graphDb)[source]
+

The function removeInfrequentVertexPairs processes a graph database by removing infrequent vertex +pairs and edge labels based on specified support thresholds.

+
+
Parameters:
+

graphDb – The graphDb parameter refers to a graph database that the algorithm is

+
+
+

operating on.

+
+ +
+
+rightMostPathExtensions(c: DFSCode, graphDb, graphIds)[source]
+

The function rightMostPathExtensions generates extensions for a given DFS code by considering +rightmost paths in a graph database.

+
+
Parameters:
+

c – The parameter c in the rightMostPathExtensions method is of type _ab.DFSCode. It

+
+
+

seems to represent a Depth-First Search code used in graph algorithms. The method is responsible +for generating extensions based on the rightmost path in a graph +:param graphDb: The graphDb parameter in the rightMostPathExtensions method is a +database that stores graph data. It is used to retrieve graph objects based on +their IDs, which are provided in the graphIds parameter. The method then performs operations on +these graph objects to generate +:param graphIds: The graphIds parameter in the rightMostPathExtensions function represents a +list of graph identifiers. These identifiers are used to retrieve specific graphs from the +graphDb database in order to perform operations on them within the function. Each ID in the +graphIds list corresponds to an identifier. +:return: The function rightMostPathExtensions returns a dictionary extensions containing +extended edges as keys and sets of graph IDs as values.

+
+ +
+
+rightMostPathExtensionsFromSingle(c: DFSCode, g: Graph)[source]
+

The function rightMostPathExtensionsFromSingle generates extensions for a given DFS code and +graph, focusing on the rightmost path.

+
+
Parameters:
+

c – The parameter c is of type _ab.DFSCode, which seems to represent a Depth-First Search

+
+
+

code. It is used in the rightMostPathExtensionsFromSingle method to perform operations related to +DFS codes +:param g: The parameter g in the provided code snippet represents a graph object. It seems to be +an instance of a graph data structure that contains vertices and edges. The code is designed to +find and return extensions from a given DFS code c based on the provided graph g. The function +` +:return: The function rightMostPathExtensionsFromSingle returns a dictionary extensions +containing extended edges as keys and sets of graph IDs as values.

+
+ +
+
+save(oFile)[source]
+

The save function writes information about frequent subgraphs to a specified +output file in a specific format.

+
+
Parameters:
+

outputPath – The save method is used to write the results of frequent

+
+
+

subgraphs to a file specified by the outputPath parameter. The method iterates over each +frequent subgraph in self.frequentSubgraphs and writes the subgraph information to the file

+
+ +
+
+startMine()[source]
+

Run the gSpan algorithm.

+
+ +
+
+subgraphIsomorphisms(c: DFSCode, g: Graph)[source]
+

The function subgraphIsomorphisms takes a DFS code and a graph as input, and finds all subgraph +isomorphisms between the DFS code and the graph.

+
+
Parameters:
+

c – The parameter c in the subgraphIsomorphisms function is of type _ab.DFSCode, which

+
+
+

seems to represent a Depth-First Search code. +:param g: The parameter g in the subgraphIsomorphisms function represents a graph object. The +function is trying to find subgraph isomorphisms between a given DFS code c and the graph g. It +iterates through the vertices of the graph starting with a specific +:return: The function subgraphIsomorphisms returns a list of dictionaries, where each dictionary +represents a subgraph isomorphism mapping between the input DFS code c and the input graph g. +Each dictionary in the list maps vertex IDs from the DFS code to corresponding vertex IDs in the +graph, indicating a valid subgraph isomorphism.

+
+ +
+ +
+
+

PAMI.subgraphMining.basic.sparseTriangularMatrix module

+
+
+class PAMI.subgraphMining.basic.sparseTriangularMatrix.SparseTriangularMatrix[source]
+

Bases: object

+
+
+getSupportForItems(i, j)[source]
+
+ +
+
+incrementCount(i, j)[source]
+
+ +
+
+removeInfrequentEntriesFromMatrix(minsup)[source]
+
+ +
+
+setSupport(i, j, support)[source]
+
+ +
+ +
+
+

PAMI.subgraphMining.basic.vertex module

+
+
+class PAMI.subgraphMining.basic.vertex.Vertex(id, vLabel)[source]
+

Bases: object

+
+
+addEdge(edge)[source]
+
+ +
+
+getEdgeList()[source]
+
+ +
+
+getId()[source]
+
+ +
+
+getLabel()[source]
+
+ +
+
+removeEdge(edgeToRemove)[source]
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.subgraphMining.html b/sphinx/_build/html/PAMI.subgraphMining.html new file mode 100644 index 000000000..62788fdc2 --- /dev/null +++ b/sphinx/_build/html/PAMI.subgraphMining.html @@ -0,0 +1,425 @@ + + + + + + + PAMI.subgraphMining package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.subgraphMining package

+
+

Subpackages

+
+ +
+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.subgraphMining.topK.html b/sphinx/_build/html/PAMI.subgraphMining.topK.html new file mode 100644 index 000000000..82733c1ee --- /dev/null +++ b/sphinx/_build/html/PAMI.subgraphMining.topK.html @@ -0,0 +1,663 @@ + + + + + + + PAMI.subgraphMining.topK package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.subgraphMining.topK package

+
+

Submodules

+
+
+

PAMI.subgraphMining.topK.DFSCode module

+
+
+class PAMI.subgraphMining.topK.DFSCode.DfsCode[source]
+

Bases: object

+
+
+add(ee)[source]
+
+ +
+
+containEdge(v1, v2)[source]
+
+ +
+
+copy()[source]
+
+ +
+
+getAllVLabels()[source]
+
+ +
+
+getAt(index)[source]
+
+ +
+
+getEeList()[source]
+
+ +
+
+getRightMost()[source]
+
+ +
+
+getRightMostPath()[source]
+
+ +
+
+isEmpty()[source]
+
+ +
+
+notPreOfRm(v)[source]
+
+ +
+
+onRightMostPath(v)[source]
+
+ +
+ +
+
+

PAMI.subgraphMining.topK.DFSThread module

+
+
+class PAMI.subgraphMining.topK.DFSThread.DfsThread(graphDb, candidates, minSup, tkgInstance)[source]
+

Bases: Thread

+
+
+run()[source]
+

Method representing the thread’s activity.

+

You may override this method in a subclass. The standard run() method +invokes the callable object passed to the object’s constructor as the +target argument, if any, with sequential and keyword arguments taken +from the args and kwargs arguments, respectively.

+
+ +
+ +
+
+

PAMI.subgraphMining.topK.abstract module

+
+
+

PAMI.subgraphMining.topK.edge module

+
+
+class PAMI.subgraphMining.topK.edge.Edge(v1, v2, edgeLabel)[source]
+

Bases: object

+
+
+another(v)[source]
+
+ +
+
+getEdgeLabel()[source]
+
+ +
+ +
+
+

PAMI.subgraphMining.topK.extendedEdge module

+
+
+class PAMI.subgraphMining.topK.extendedEdge.ExtendedEdge(v1, v2, vLabel1, vLabel2, edgeLabel)[source]
+

Bases: object

+
+
+getEdgeLabel()[source]
+
+ +
+
+getV1()[source]
+
+ +
+
+getV2()[source]
+
+ +
+
+getVLabel1()[source]
+
+ +
+
+getVLabel2()[source]
+
+ +
+
+pairSmallerThan(x1, x2, y1, y2)[source]
+
+ +
+
+smallerThan(that)[source]
+
+ +
+
+smallerThanOriginal(that)[source]
+
+ +
+ +
+
+

PAMI.subgraphMining.topK.frequentSubgraph module

+
+
+class PAMI.subgraphMining.topK.frequentSubgraph.FrequentSubgraph(dfsCode, setOfGraphsIds, support)[source]
+

Bases: object

+
+ +
+
+

PAMI.subgraphMining.topK.graph module

+
+
+class PAMI.subgraphMining.topK.graph.Graph(id, vMap=None, dfsCode=None)[source]
+

Bases: object

+
+
+EMPTY_INTEGER_ARRAY = []
+
+ +
+
+EMPTY_VERTEX_LIST = []
+
+ +
+
+findAllWithLabel(targetLabel)[source]
+
+ +
+
+getAllNeighbors(v)[source]
+
+ +
+
+getAllVertices()[source]
+
+ +
+
+getEdge(v1, v2)[source]
+
+ +
+
+getEdgeCount()[source]
+
+ +
+
+getEdgeLabel(v1, v2)[source]
+
+ +
+
+getId()[source]
+
+ +
+
+getNonPrecalculatedAllVertices()[source]
+
+ +
+
+getVLabel(v)[source]
+
+ +
+
+isNeighboring(v1, v2)[source]
+
+ +
+
+precalculateLabelsToVertices()[source]
+
+ +
+
+precalculateVertexList()[source]
+
+ +
+
+precalculateVertexNeighbors()[source]
+
+ +
+
+removeInfrequentLabel(label)[source]
+
+ +
+ +
+
+

PAMI.subgraphMining.topK.sparseTriangularMatrix module

+
+
+class PAMI.subgraphMining.topK.sparseTriangularMatrix.SparseTriangularMatrix[source]
+

Bases: object

+
+
+getSupportForItems(i, j)[source]
+
+ +
+
+incrementCount(i, j)[source]
+
+ +
+
+removeInfrequentEntriesFromMatrix(minsup)[source]
+
+ +
+
+setSupport(i, j, support)[source]
+
+ +
+ +
+
+

PAMI.subgraphMining.topK.tkg module

+
+
+class PAMI.subgraphMining.topK.tkg.TKG(iFile, k, maxNumberOfEdges=inf, outputSingleVertices=True, outputGraphIds=False)[source]
+

Bases: _TKG

+
+ +
+ +
+
+EDGE_COUNT_PRUNING = True
+
+ +
+
+ELIMINATE_INFREQUENT_EDGE_LABELS = True
+
+ +
+
+ELIMINATE_INFREQUENT_VERTEX_PAIRS = True
+
+ +
+
+ELIMINATE_INFREQUENT_VERTICES = True
+
+ +
+
+class Pair(x, y)[source]
+

Bases: object

+
+ +
+ +
+ +
+
+findAllOnlyOneVertex(graphDB, outputFrequentVertices)[source]
+
+ +
+
+gSpan(graphDB, outputFrequentVertices)[source]
+

The main gSpan function to find frequent subgraphs. +:param graphDb: The database of graphs to mine. +:param outputFrequentVertices: Boolean indicating whether to output single vertices as subgraphs.

+
+ +
+
+getKSubgraphs()[source]
+
+ +
+
+getMemoryRSS()[source]
+
+ +
+
+getMemoryUSS()[source]
+
+ +
+
+getMinSupport()[source]
+
+ +
+
+getQueueSize(queue)[source]
+
+ +
+
+getRuntime()[source]
+
+ +
+
+getSubgraphs()[source]
+

Creates a copy of the queue’s contents without emptying the original queue.

+
+ +
+
+gspanDfs(c: DfsCode, graphDB, subgraphId)[source]
+
+ +
+
+gspanDynamicDFS(c, graphDB, graphIds)[source]
+
+ +
+
+isCanonical(c: DfsCode)[source]
+
+ +
+
+readGraphs(path)[source]
+

The readGraphs function reads graph data from a file and constructs a list of graphs with vertices +and edges.

+
+
Parameters:
+

path – This method reads the graph data from the specified file and constructs a list of graphs

+
+
+

represented by vertices and edges +:return: The readGraphs method returns a list of _ab.Graph objects, which represent graphs read +from the file.

+
+ +
+
+registerAsCandidate(subgraph)[source]
+
+ +
+
+removeInfrequentVertexPairs(graphDB)[source]
+
+ +
+
+rightMostPathExtensions(c, graphDB, graphIds)[source]
+
+ +
+
+rightMostPathExtensionsFromSingle(c, g)[source]
+
+ +
+
+save(oFile)[source]
+

The save function writes subgraph information to a file in a specific format.

+
+
Parameters:
+

oFile – The oFile parameter in the save method is the file path where the output will be

+
+
+

saved. This method writes the subgraphs information to the specified file in a specific format

+
+ +
+
+savePattern(subgraph)[source]
+
+ +
+
+startMine()[source]
+

This Python function starts a mining process on a graph database, calculates runtime, pattern count, +and memory usage metrics.

+
+ +
+
+startThreads(graphDB, candidates, minSup)[source]
+
+ +
+
+subgraphIsomorphisms(c, g)[source]
+
+ +
+ +
+
+

PAMI.subgraphMining.topK.vertex module

+
+
+class PAMI.subgraphMining.topK.vertex.Vertex(id, vLabel)[source]
+

Bases: object

+
+
+addEdge(edge)[source]
+
+ +
+
+getEdgeList()[source]
+
+ +
+
+getId()[source]
+
+ +
+
+getLabel()[source]
+
+ +
+
+removeEdge(edgeToRemove)[source]
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.uncertainFaultTolerantFrequentPattern.html b/sphinx/_build/html/PAMI.uncertainFaultTolerantFrequentPattern.html new file mode 100644 index 000000000..192117a13 --- /dev/null +++ b/sphinx/_build/html/PAMI.uncertainFaultTolerantFrequentPattern.html @@ -0,0 +1,374 @@ + + + + + + + PAMI.uncertainFaultTolerantFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.uncertainFaultTolerantFrequentPattern package

+
+

Submodules

+
+
+

PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine module

+
+
+class PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine.VBFTMine(iFile, minSup, itemSup, minLength, faultTolerance, sep='\t')[source]
+

Bases: _faultTolerantFrequentPatterns

+
+
Description:
+

VBFTMine is one of the fundamental algorithm to discover fault tolerant frequent patterns in an uncertain transactional database based on +bitset representation. +This program employs apriori property (or downward closure property) to reduce the search space effectively.

+
+
Reference:
+

Koh, JL., Yo, PW. (2005). An Efficient Approach for Mining Fault-Tolerant Frequent Patterns Based on Bit Vector Representations. +In: Zhou, L., Ooi, B.C., Meng, X. (eds) Database Systems for Advanced Applications. DASFAA 2005. Lecture Notes in Computer Science, +vol 3453. Springer, Berlin, Heidelberg. https://doi.org/10.1007/11408079_51

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of uncertain Fault Tolerant FrequentFrequent Patterns

  • +
  • oFile – str : +Name of the output file to store complete set of uncertain Fault Tolerant FrequentFrequent Patterns

  • +
  • minSup – float or int or str : +The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

  • +
  • itemSup – int or float : +Frequency of an item

  • +
  • minLength – int +minimum length of a pattern

  • +
  • faultTolerance – int : +The ability of a pattern mining algorithm to handle errors or inconsistencies in the data without completely failing or producing incorrect results.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
startTimefloat

To record the start time of the mining process

+
+
endTimefloat

To record the completion time of the mining process

+
+
finalPatternsdict

Storing the complete set of patterns in a dictionary variable

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 VBFTMine.py <inputFile> <outputFile> <minSup> <itemSup> <minLength> <faultTolerance>
+
+Examples usage:
+
+(.venv) $ python3 VBFTMine.py sampleDB.txt patterns.txt 10.0 3.0 3 1
+
+
+        .. note:: minSup will be considered in times of minSup and count of database transactions
+
+
+
+
+

Sample run of the importing code:

+
import PAMI.faultTolerantFrequentPattern.basic.VBFTMine as alg
+
+obj = alg.VBFTMine(iFile, minSup, itemSup, minLength, faultTolerance)
+
+obj.startMine()
+
+faultTolerantFrequentPattern = obj.getPatterns()
+
+print("Total number of Fault Tolerant Frequent Patterns:", len(faultTolerantFrequentPattern))
+
+obj.save(oFile)
+
+Df = obj.getPatternInDataFrame()
+
+print("Total Memory in USS:", obj.getMemoryUSS())
+
+print("Total Memory in RSS", obj.getMemoryRSS())
+
+print("Total ExecutionTime in seconds:", obj.getRuntime())
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Frequent pattern mining process will start from here

+
+ +
+
+ +
+
+

PAMI.uncertainFaultTolerantFrequentPattern.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.uncertainFrequentPattern.basic.html b/sphinx/_build/html/PAMI.uncertainFrequentPattern.basic.html new file mode 100644 index 000000000..b88cbed79 --- /dev/null +++ b/sphinx/_build/html/PAMI.uncertainFrequentPattern.basic.html @@ -0,0 +1,444 @@ + + + + + + + PAMI.uncertainFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.uncertainFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.uncertainFrequentPattern.basic.CUFPTree module

+
+
+class PAMI.uncertainFrequentPattern.basic.CUFPTree.CUFPTree(iFile, minSup, sep='\t')[source]
+

Bases: _frequentPatterns

+
+
Description:
+

It is one of the fundamental algorithm to discover frequent patterns in a uncertain transactional database using CUFP-Tree.

+
+
Reference:
+

Chun-Wei Lin Tzung-PeiHong, ‘new mining approach for uncertain databases using CUFP trees’, +Expert Systems with Applications, Volume 39, Issue 4, March 2012, Pages 4084-4093, https://doi.org/10.1016/j.eswa.2011.09.087

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of Uncertain Frequent Patterns

  • +
  • oFile – str : +Name of the output file to store complete set of Uncertain frequent patterns

  • +
  • minSup – int or float or str : +minimum support thresholds were tuned to find the appropriate ranges in the limited memory

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the Input file or path of the input file

+
+
oFilefile

Name of the output file or path of the output file

+
+
minSup: float or int or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

To represent the total no of transaction

+
+
treeclass

To represents the Tree class

+
+
itemSetCountint

To represents the total no of patterns

+
+
finalPatternsdict

To store the complete patterns

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets(fileName)

Scans the dataset and stores in a list format

+
+
frequentOneItem()

Extracts the one-length frequent patterns from database

+
+
updateTransactions()

Update the transactions by removing non-frequent items and sort the Database by item decreased support

+
+
buildTree()

After updating the Database, remaining items will be added into the tree by setting root node as null

+
+
convert()

to convert the user specified value

+
+
startMine()

Mining process will start from this function

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 CUFPTree.py <inputFile> <outputFile> <minSup>
+
+Example Usage:
+
+(.venv) $ python3 CUFPTree.py sampleTDB.txt patterns.txt 3
+
+
+
+        .. note:: minSup  will be considered in support count or frequency
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.uncertainFrequentPattern.basic import CUFPTree as alg
+
+obj = alg.CUFPTree(iFile, minSup)v
+
+obj.startMine()
+
+frequentPatterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(frequentPatterns))
+
+obj.save(oFile)
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() dict[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Main method where the patterns are mined by constructing tree and remove the false patterns by counting the original support of a patterns. +:return: None

+
+ +
+
+printResults() None[source]
+

This function is used to print the results +:return: None

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Main method where the patterns are mined by constructing tree and remove the false patterns by counting the original support of a patterns. +:return: None

+
+ +
+
+ +
+
+

PAMI.uncertainFrequentPattern.basic.PUFGrowth module

+
+
+

PAMI.uncertainFrequentPattern.basic.TUFP module

+
+
+

PAMI.uncertainFrequentPattern.basic.TubeP module

+
+
+

PAMI.uncertainFrequentPattern.basic.TubeS module

+
+
+

PAMI.uncertainFrequentPattern.basic.UFGrowth module

+
+
+

PAMI.uncertainFrequentPattern.basic.UVECLAT module

+
+
+

PAMI.uncertainFrequentPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.uncertainFrequentPattern.html b/sphinx/_build/html/PAMI.uncertainFrequentPattern.html new file mode 100644 index 000000000..17a296b2d --- /dev/null +++ b/sphinx/_build/html/PAMI.uncertainFrequentPattern.html @@ -0,0 +1,200 @@ + + + + + + + PAMI.uncertainFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.uncertainGeoreferencedFrequentPattern.basic.html b/sphinx/_build/html/PAMI.uncertainGeoreferencedFrequentPattern.basic.html new file mode 100644 index 000000000..a5ed82066 --- /dev/null +++ b/sphinx/_build/html/PAMI.uncertainGeoreferencedFrequentPattern.basic.html @@ -0,0 +1,423 @@ + + + + + + + PAMI.uncertainGeoreferencedFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.uncertainGeoreferencedFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth module

+
+
+class PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth.GFPGrowth(iFile, nFile, minSup, sep='\t')[source]
+

Bases: _frequentPatterns

+
+
Description:
+

GFPGrowth algorithm is used to discover geo-referenced frequent patterns in a uncertain transactional database using GFP-Tree.

+
+
Reference:
+

Palla Likhitha,Pamalla Veena, Rage, Uday Kiran, Koji Zettsu (2023). +“Discovering Geo-referenced Frequent Patterns in Uncertain Geo-referenced +Transactional Databases”. PAKDD 2023. +https://doi.org/10.1007/978-3-031-33380-4_3

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of uncertain Geo referenced Frequent Patterns

  • +
  • oFile – str : +Name of the output file to store complete set of Uncertain Geo referenced frequent patterns

  • +
  • minSup – str: +minimum support thresholds were tuned to find the appropriate ranges in the limited memory

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the Input file or path of the input file

+
+
oFilefile

Name of the output file or path of the output file

+
+
minSup: float or int or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

To represent the total no of transaction

+
+
treeclass

To represents the Tree class

+
+
itemSetCountint

To represents the total no of patterns

+
+
finalPatternsdict

To store the complete patterns

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
savePatterns(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets(fileName)

Scans the dataset and stores in a list format

+
+
frequentOneItem()

Extracts the one-length frequent patterns from database

+
+
updateTransactions()

Update the transactions by removing non-frequent items and sort the Database by item decreased support

+
+
buildTree()

After updating the Database, remaining items will be added into the tree by setting root node as null

+
+
convert()

to convert the user specified value

+
+
startMine()

Mining process will start from this function

+
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 GFPGrowth.py <inputFile> <neighborFile> <outputFile> <minSup>
+
+Examples usage:
+
+(.venv) $ python3 GFPGrowth.py sampleTDB.txt sampleNeighbor.txt patterns.txt 3
+
+
+        .. note:: minSup  will be considered in support count or frequency
+
+
+
+
+

Sample run of importing the code:

+
+
from PAMI.uncertainGeoreferencedFrequentPattern.basic import GFPGrowth as alg
+
+obj = alg.GFPGrowth(iFile, nFile, minSup)
+
+obj.startMine()
+
+Patterns = obj.getPatterns()
+
+print("Total number of  Patterns:", len(Patterns))
+
+obj.save(oFile)
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine()[source]
+

Main method where the patterns are mined by constructing tree and remove the false patterns by counting the original support of a patterns

+
+ +
+
+printResults()[source]
+

To print all the stats

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Main method where the patterns are mined by constructing tree and remove the false patterns by counting the original support of a patterns

+
+ +
+
+ +
+
+

PAMI.uncertainGeoreferencedFrequentPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.uncertainGeoreferencedFrequentPattern.html b/sphinx/_build/html/PAMI.uncertainGeoreferencedFrequentPattern.html new file mode 100644 index 000000000..46dbdf32d --- /dev/null +++ b/sphinx/_build/html/PAMI.uncertainGeoreferencedFrequentPattern.html @@ -0,0 +1,194 @@ + + + + + + + PAMI.uncertainGeoreferencedFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.uncertainPeriodicFrequentPattern.basic.html b/sphinx/_build/html/PAMI.uncertainPeriodicFrequentPattern.basic.html new file mode 100644 index 000000000..f278679f5 --- /dev/null +++ b/sphinx/_build/html/PAMI.uncertainPeriodicFrequentPattern.basic.html @@ -0,0 +1,696 @@ + + + + + + + PAMI.uncertainPeriodicFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.uncertainPeriodicFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth module

+
+
+class PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth.UPFPGrowth(iFile, minSup, maxPer, sep='\t')[source]
+

Bases: _periodicFrequentPatterns

+
+
Description:
+

Basic is to discover periodic-frequent patterns in a uncertain temporal database.

+
+
Reference:
+

Uday Kiran, R., Likhitha, P., Dao, MS., Zettsu, K., Zhang, J. (2021). +Discovering Periodic-Frequent Patterns in Uncertain Temporal Databases. In: +Mantoro, T., Lee, M., Ayu, M.A., Wong, K.W., Hidayanto, A.N. (eds) Neural Information Processing. +ICONIP 2021. Communications in Computer and Information Science, vol 1516. Springer, Cham. +https://doi.org/10.1007/978-3-030-92307-5_83

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of Uncertain Periodic Frequent Patterns

  • +
  • oFile – str : +Name of the output file to store complete set of Uncertain Periodic Frequent patterns

  • +
  • minSup – float: +minimum support thresholds were tuned to find the appropriate ranges in the limited memory

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
  • maxper – float : +where maxPer represents the maximum periodicity threshold value specified by the user.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the Input file or path of the input file

+
+
oFilefile

Name of the output file or path of output file

+
+
minSup: int or float or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
maxPer: int or float or str

The user can specify maxPer either in count or proportion of database size. +If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. +Otherwise, it will be treated as float. +Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float

+
+
sep: str

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
memoryUSS: float

To store the total amount of USS memory consumed by the program

+
+
memoryRSS: float

To store the total amount of RSS memory consumed by the program

+
+
startTime: float

To record the start time of the mining process

+
+
endTime: float

To record the completion time of the mining process

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
_lnoint

To represent the total no of transaction

+
+
treeclass

To represents the Tree class

+
+
finalPatternsdict

To store the complete patterns

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of periodic-frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of periodic-frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets()

Scans the dataset and stores in a list format

+
+
PeriodicFrequentOneItem()

Extracts the one-periodic-frequent patterns from database

+
+
updateTransaction()

Update the database by removing aperiodic items and sort the Database by item decreased support

+
+
buildTree()

After updating the Database, remaining items will be added into the tree by setting root node as null

+
+
convert()

To convert the user specified value

+
+
removeFalsePositives()

To remove the false positives in generated patterns

+
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 basic.py <inputFile> <outputFile> <minSup> <maxPer>
+
+Example Usage:
+
+(.venv) $ python3 basic.py sampleTDB.txt patterns.txt 0.3 4
+
+
+        .. note:: minSup and maxPer will be considered in support count or frequency
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.uncertainPeriodicFrequentPattern.basic import UPFPGrowth as alg
+
+obj = alg.UPFPGrowth(iFile, minSup, maxPer)
+
+obj.startMine()
+
+periodicFrequentPatterns = obj.getPatterns()
+
+print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns))
+
+obj.save(oFile)
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, List[float]][source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

Main method where the patterns are mined by constructing tree and remove the false patterns +by counting the original support of a patterns. +:return: None

+
+ +
+
+printResults() None[source]
+

This function is used to print the results

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

Main method where the patterns are mined by constructing tree and remove the false patterns +by counting the original support of a patterns. +:return: None

+
+ +
+
+ +
+
+

PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus module

+
+
+class PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus.UPFPGrowthPlus(iFile, minSup, maxPer, sep='\t')[source]
+

Bases: _periodicFrequentPatterns

+
+
Description:
+

Basic Plus is to discover periodic-frequent patterns in a uncertain temporal database.

+
+
Reference:
+

Palla Likhitha, Rage Veena,Rage Uday Kiran, Koji Zettsu, Masashi Toyoda, Philippe Fournier-Viger, (2023). +UPFP-growth++: An Efficient Algorithm to Find Periodic-Frequent Patterns in Uncertain Temporal Databases. +ICONIP 2022. Communications in Computer and Information Science, vol 1792. Springer, Singapore. +https://doi.org/10.1007/978-981-99-1642-9_16

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of Uncertain Periodic Frequent Patterns

  • +
  • oFile – str : +Name of the output file to store complete set of Uncertain Periodic Frequent patterns

  • +
  • minSup – str: +minimum support thresholds were tuned to find the appropriate ranges in the limited memory

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
  • maxper – floot : +where maxPer represents the maximum periodicity threshold value specified by the user.

  • +
+
+
Attributes:
+
+
iFile: file

Name of the Input file or path of input file

+
+
oFile: file

Name of the output file or path of output file

+
+
minSup: int or float or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
maxPer: int or float or str

The user can specify maxPer either in count or proportion of database size. +If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. +Otherwise, it will be treated as float. +Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float

+
+
sep: str

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
memoryUSS: float

To store the total amount of USS memory consumed by the program

+
+
memoryRSS: float

To store the total amount of RSS memory consumed by the program

+
+
startTime: float

To record the start time of the mining process

+
+
endTime: float

To record the completion time of the mining process

+
+
Database: list

To store the transactions of a database in list

+
+
mapSupport: Dictionary

To maintain the information of item and their frequency

+
+
lno: int

To represent the total no of transaction

+
+
tree: class

To represents the Tree class

+
+
itemSetCount: int

To represents the total no of patterns

+
+
finalPatterns: dict

To store the complete patterns

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
savePatterns(oFile)

Complete set of periodic-frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of periodic-frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets(fileName)

Scans the dataset and stores in a list format

+
+
updateDatabases()

Update the database by removing aperiodic items and sort the Database by item decreased support

+
+
buildTree()

After updating the Database, remaining items will be added into the tree by setting root node as null

+
+
convert()

to convert the user specified value

+
+
PeriodicFrequentOneItems()

To extract the one-length periodic-frequent items

+
+
+
+
+
+

Executing the code on terminal:

+
Format:
+
+(.venv) $ python3 UPFPGrowthPlus.py <inputFile> <outputFile> <minSup> <maxPer>
+
+Examples Usage:
+
+(.venv) $ python3 UPFPGrowthPlus.py sampleTDB.txt patterns.txt 0.3 4
+
+
+        .. note:: minSup and maxPer will be considered in support count or frequency
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.uncertainPeriodicFrequentPattern import UPFPGrowthPlus as alg
+
+obj = alg.UPFPGrowthPlus(iFile, minSup, maxPer)
+
+obj.startMine()
+
+periodicFrequentPatterns = obj.getPatterns()
+
+print("Total number of uncertain Periodic Frequent Patterns:", len(periodicFrequentPatterns))
+
+obj.save(oFile)
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS()[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS()[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function. +:return: returning USS memory consumed by the mining process +:rtype: float

+
+ +
+
+getPatterns()[source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame()[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime()[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine()[source]
+

Main method where the patterns are mined by constructing tree and remove the false patterns by counting the original support of a patterns

+
+ +
+
+printResults()[source]
+

This function is used to print the results

+
+ +
+
+save(outFile)[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
+
+ +
+
+startMine()[source]
+

Main method where the patterns are mined by constructing tree and remove the false patterns by counting the original support of a patterns

+
+ +
+
+ +
+
+PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus.printTree(root)[source]
+

To print the tree with nodes with item name, probability, timestamps, and second probability respectively.

+

Attributes:

+
+
Parameters:
+

root – Node

+
+
Returns:
+

print all Tree with nodes with items, probability, parent item, timestamps, second probability respectively.

+
+
+
+ +
+
+

PAMI.uncertainPeriodicFrequentPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.uncertainPeriodicFrequentPattern.html b/sphinx/_build/html/PAMI.uncertainPeriodicFrequentPattern.html new file mode 100644 index 000000000..c34799227 --- /dev/null +++ b/sphinx/_build/html/PAMI.uncertainPeriodicFrequentPattern.html @@ -0,0 +1,210 @@ + + + + + + + PAMI.uncertainPeriodicFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.weightedFrequentNeighbourhoodPattern.basic.html b/sphinx/_build/html/PAMI.weightedFrequentNeighbourhoodPattern.basic.html new file mode 100644 index 000000000..ae63a1d26 --- /dev/null +++ b/sphinx/_build/html/PAMI.weightedFrequentNeighbourhoodPattern.basic.html @@ -0,0 +1,422 @@ + + + + + + + PAMI.weightedFrequentNeighbourhoodPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.weightedFrequentNeighbourhoodPattern.basic package

+
+

Submodules

+
+
+

PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth module

+
+
+class PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth.SWFPGrowth(iFile: str | DataFrame, nFile: str | DataFrame, minWS: int | float | str, sep='\t')[source]
+

Bases: _weightedFrequentSpatialPatterns

+
+
Description:
+

SWFPGrowth is an algorithm to mine the weighted spatial frequent patterns in spatiotemporal databases.

+
+
Reference:
+

R. Uday Kiran, P. P. C. Reddy, K. Zettsu, M. Toyoda, M. Kitsuregawa and P. Krishna Reddy, +“Discovering Spatial Weighted Frequent Itemsets in Spatiotemporal Databases,” 2019 International +Conference on Data Mining Workshops (ICDMW), 2019, pp. 987-996, doi: 10.1109/ICDMW.2019.00143.

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of weighted Frequent Neighbourhood Patterns.

  • +
  • oFile – str : +Name of the output file to store complete set of weighted Frequent Neighbourhood Patterns.

  • +
  • minSup – int or str or float: +minimum support thresholds were tuned to find the appropriate ranges in the limited memory

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
  • maxper – floot : +where maxPer represents the maximum periodicity threshold value specified by the user.

  • +
+
+
Attributes:
+
+
iFilefile

Input file name or path of the input file

+
+
minWS: float or int or str

The user can specify minWS either in count or proportion of database size. +If the program detects the data type of minWS is integer, then it treats minWS is expressed in count. +Otherwise, it will be treated as float. +Example: minWS=10 will be treated as integer, while minWS=10.0 will be treated as float

+
+
minWeight: float or int or str

The user can specify minWeight either in count or proportion of database size. +If the program detects the data type of minWeight is integer, then it treats minWeight is expressed in count. +Otherwise, it will be treated as float. +Example: minWeight=10 will be treated as integer, while minWeight=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default separator is tab space or . +However, the users can override their default separator.

+
+
oFilefile

Name of the output file or the path of the output file

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transactions

+
+
treeclass

it represents the Tree class

+
+
finalPatternsdict

it represents to store the patterns

+
+
+
+
+

:Methods :

+
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to an output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets()

Scans the dataset or dataframes and stores in list format

+
+
frequentOneItem()

Extracts the one-frequent patterns from transactions

+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 SWFPGrowth.py <inputFile> <weightFile> <outputFile> <minSup> <minWeight>
+
+Example usage :
+
+(.venv) $ python3 SWFPGrowth.py sampleDB.txt weightFile.txt patterns.txt 10  2
+
+
+        .. note:: minSup will be considered in support count or frequency
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.weightFrequentNeighbourhoodPattern.basic import SWFPGrowth as alg
+
+obj = alg.SWFPGrowth(iFile, wFile, nFile, minSup, minWeight, seperator)
+
+obj.startMine()
+
+frequentPatterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(frequentPatterns))
+
+obj.save(oFile)
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getmemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, float][source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

main program to start the operation +:return : None

+
+ +
+
+printResults() None[source]
+

This function is used to print the results +:return: None

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

main program to start the operation +:return : None

+
+ +
+
+ +
+
+

PAMI.weightedFrequentNeighbourhoodPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.weightedFrequentNeighbourhoodPattern.html b/sphinx/_build/html/PAMI.weightedFrequentNeighbourhoodPattern.html new file mode 100644 index 000000000..7dd6c7d00 --- /dev/null +++ b/sphinx/_build/html/PAMI.weightedFrequentNeighbourhoodPattern.html @@ -0,0 +1,194 @@ + + + + + + + PAMI.weightedFrequentNeighbourhoodPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.weightedFrequentPattern.basic.html b/sphinx/_build/html/PAMI.weightedFrequentPattern.basic.html new file mode 100644 index 000000000..1302ec145 --- /dev/null +++ b/sphinx/_build/html/PAMI.weightedFrequentPattern.basic.html @@ -0,0 +1,429 @@ + + + + + + + PAMI.weightedFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.weightedFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.weightedFrequentPattern.basic.WFIM module

+
+
+class PAMI.weightedFrequentPattern.basic.WFIM.WFIM(iFile: str, wFile: str, minSup: str, minWeight: int, sep: str = '\t')[source]
+

Bases: _weightedFrequentPatterns

+
+
Description:
+
    +
  • WFMiner is one of the fundamental algorithm to discover weighted frequent patterns in a transactional database.

  • +
  • It stores the database in compressed fp-tree decreasing the memory usage and extracts the patterns from tree.It employs employs downward closure property to reduce the search space effectively.

  • +
+
+
+
+
:Reference :

U. Yun and J. J. Leggett, “Wfim: weighted frequent itemset mining with a weight range and a minimum weight,” +in Proceedings of the 2005 SIAM International Conference on Data Mining. SIAM, 2005, pp. 636–640. +https://epubs.siam.org/doi/pdf/10.1137/1.9781611972757.76

+
+
+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of weighted Frequent Patterns.

  • +
  • oFile – str : +Name of the output file to store complete set of weighted Frequent Patterns.

  • +
  • minSup – str or int or float: +minimum support thresholds were tuned to find the appropriate ranges in the limited memory

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
+
+
+

:Attributes :

+
+
+
iFilefile

Input file name or path of the input file

+
+
minSup: float or int or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
minWeight: float or int or str

The user can specify minWeight either in count or proportion of database size. +If the program detects the data type of minWeight is integer, then it treats minWeight is expressed in count. +Otherwise, it will be treated as float. +Example: minWeight=10 will be treated as integer, while minWeight=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default separator is tab space or . +However, the users can override their default separator.

+
+
oFilefile

Name of the output file or the path of the output file

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transactions

+
+
treeclass

it represents the Tree class

+
+
finalPatternsdict

it represents to store the patterns

+
+
+
+

:Methods :

+
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets()

Scans the dataset or dataframes and stores in list format

+
+
frequentOneItem()

Extracts the one-frequent patterns from transactions

+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 basic.py <inputFile> <weightFile> <outputFile> <minSup> <minWeight>
+
+Example Usage:
+
+(.venv) $ python3 basic.py sampleDB.txt weightSample.txt patterns.txt 10.0 3.4
+
+
+        .. note:: minSup and maxPer will be considered in support count or frequency
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.weightFrequentPattern.basic import basic as alg
+
+obj = alg.basic(iFile, wFile, minSup, minWeight)
+
+obj.startMine()
+
+frequentPatterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(frequentPatterns))
+
+obj.savePatterns(oFile)
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getmemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function.

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, int][source]
+

Function to send the set of frequent patterns after completion of the mining process.

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe.

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process.

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

main program to start the operation +:return: None

+
+ +
+
+printResults() None[source]
+

This function is used to print the results +:return: None

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file.

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

main program to start the operation +:return: None

+
+ +
+
+ +
+
+

PAMI.weightedFrequentPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.weightedFrequentPattern.html b/sphinx/_build/html/PAMI.weightedFrequentPattern.html new file mode 100644 index 000000000..4670ce1fd --- /dev/null +++ b/sphinx/_build/html/PAMI.weightedFrequentPattern.html @@ -0,0 +1,194 @@ + + + + + + + PAMI.weightedFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.weightedFrequentRegularPattern.basic.html b/sphinx/_build/html/PAMI.weightedFrequentRegularPattern.basic.html new file mode 100644 index 000000000..752111722 --- /dev/null +++ b/sphinx/_build/html/PAMI.weightedFrequentRegularPattern.basic.html @@ -0,0 +1,422 @@ + + + + + + + PAMI.weightedFrequentRegularPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.weightedFrequentRegularPattern.basic package

+
+

Submodules

+
+
+

PAMI.weightedFrequentRegularPattern.basic.WFRIMiner module

+
+
+class PAMI.weightedFrequentRegularPattern.basic.WFRIMiner.WFRIMiner(iFile, _wFile, WS, regularity, sep='\t')[source]
+

Bases: _weightedFrequentRegularPatterns

+
+
Description:
+

WFRIMiner is one of the fundamental algorithm to discover weighted frequent regular patterns in a transactional database. +* It stores the database in compressed WFRI-tree decreasing the memory usage and extracts the patterns from tree.It employs downward closure property to reduce the search space effectively.

+
+
Reference:
+

K. Klangwisan and K. Amphawan, “Mining weighted-frequent-regular itemsets from transactional database,” +2017 9th International Conference on Knowledge and Smart Technology (KST), 2017, pp. 66-71, +doi: 10.1109/KST.2017.7886090.

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of Weighted Frequent Regular Patterns.

  • +
  • oFile – str : +Name of the output file to store complete set of Weighted Frequent Regular Patterns.

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
  • wFile – str : +This is a weighted file.

  • +
+
+
Attributes:
+
+
iFilefile

Input file name or path of the input file

+
+
WS: float or int or str

The user can specify WS either in count or proportion of database size. +If the program detects the data type of WS is integer, then it treats WS is expressed in count. +Otherwise, it will be treated as float. +Example: WS=10 will be treated as integer, while WS=10.0 will be treated as float

+
+
regularity: float or int or str

The user can specify regularity either in count or proportion of database size. +If the program detects the data type of regularity is integer, then it treats regularity is expressed in count. +Otherwise, it will be treated as float. +Example: regularity=10 will be treated as integer, while regularity=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default separator is tab space or . +However, the users can override their default separator.

+
+
oFilefile

Name of the output file or the path of the output file

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

it represents the total no of transactions

+
+
treeclass

it represents the Tree class

+
+
finalPatternsdict

it represents to store the patterns

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to an output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets()

Scans the dataset or dataframes and stores in list format

+
+
frequentOneItem()

Extracts the one-frequent patterns from transactions

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 WFRIMiner.py <inputFile> <outputFile> <weightSupport> <regularity>
+
+Example Usage:
+
+(.venv) $ python3 WFRIMiner.py sampleDB.txt patterns.txt 10 5
+
+
+        .. note:: WS & regularity will be considered in support count or frequency
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.weightedFrequentRegularpattern.basic import WFRIMiner as alg
+
+obj = alg.WFRIMiner(iFile, WS, regularity)
+
+obj.startMine()
+
+weightedFrequentRegularPatterns = obj.getPatterns()
+
+print("Total number of Frequent Patterns:", len(weightedFrequentRegularPatterns))
+
+obj.save(oFile)
+
+Df = obj.getPatternInDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+

Credits:

+
+

The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.

+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning RSS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
Returns:
+

returning USS memory consumed by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+getPatterns() Dict[str, float][source]
+

Function to send the set of frequent patterns after completion of the mining process

+
+
Returns:
+

returning frequent patterns

+
+
Return type:
+

dict

+
+
+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe

+
+
Returns:
+

returning frequent patterns in a dataframe

+
+
Return type:
+

pd.DataFrame

+
+
+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process

+
+
Returns:
+

returning total amount of runtime taken by the mining process

+
+
Return type:
+

float

+
+
+
+ +
+
+mine() None[source]
+

main program to start the operation +:return: None

+
+ +
+
+printResults() None[source]
+

This function is used to print the results +:return: None

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

main program to start the operation +:return: None

+
+ +
+
+ +
+
+

PAMI.weightedFrequentRegularPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.weightedFrequentRegularPattern.html b/sphinx/_build/html/PAMI.weightedFrequentRegularPattern.html new file mode 100644 index 000000000..2d37542cd --- /dev/null +++ b/sphinx/_build/html/PAMI.weightedFrequentRegularPattern.html @@ -0,0 +1,194 @@ + + + + + + + PAMI.weightedFrequentRegularPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.weightedUncertainFrequentPattern.basic.html b/sphinx/_build/html/PAMI.weightedUncertainFrequentPattern.basic.html new file mode 100644 index 000000000..bf8d2a049 --- /dev/null +++ b/sphinx/_build/html/PAMI.weightedUncertainFrequentPattern.basic.html @@ -0,0 +1,388 @@ + + + + + + + PAMI.weightedUncertainFrequentPattern.basic package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI.weightedUncertainFrequentPattern.basic package

+
+

Submodules

+
+
+

PAMI.weightedUncertainFrequentPattern.basic.WUFIM module

+
+
+class PAMI.weightedUncertainFrequentPattern.basic.WUFIM.WUFIM(iFile, wFile, expSup, expWSup, sep='\t')[source]
+

Bases: _weightedFrequentPatterns

+
+
Description:
+

It is one of the algorithm to discover weighted frequent patterns in a uncertain transactional database using PUF-Tree.

+
+
Reference:
+

Efficient Mining of Weighted Frequent Itemsets in Uncertain Databases, In book: Machine Learning and Data Mining in Pattern Recognition Chun-Wei Jerry Lin, Wensheng Gan, Philippe Fournier Viger, Tzung-Pei Hong

+
+
Parameters:
+
    +
  • iFile – str : +Name of the Input file to mine complete set of Weighted Uncertain Periodic Frequent Patterns

  • +
  • oFile – str : +Name of the output file to store complete set of Weighted Uncertain Periodic Frequent Patterns

  • +
  • minSup – str: +minimum support thresholds were tuned to find the appropriate ranges in the limited memory

  • +
  • sep – str : +This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator.

  • +
  • wFile – str : +This is a weighted file.

  • +
+
+
Attributes:
+
+
iFilefile

Name of the Input file or path of the input file

+
+
wFilefile

Name of the Input file or path of the input file

+
+
oFilefile

Name of the output file or path of the output file

+
+
minSupfloat or int or str

The user can specify minSup either in count or proportion of database size. +If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. +Otherwise, it will be treated as float. +Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float

+
+
sepstr

This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or . +However, the users can override their default separator.

+
+
memoryUSSfloat

To store the total amount of USS memory consumed by the program

+
+
memoryRSSfloat

To store the total amount of RSS memory consumed by the program

+
+
startTime:float

To record the start time of the mining process

+
+
endTime:float

To record the completion time of the mining process

+
+
Databaselist

To store the transactions of a database in list

+
+
mapSupportDictionary

To maintain the information of item and their frequency

+
+
lnoint

To represent the total no of transaction

+
+
treeclass

To represents the Tree class

+
+
itemSetCountint

To represents the total no of patterns

+
+
finalPatternsdict

To store the complete patterns

+
+
+
+
Methods:
+
+
startMine()

Mining process will start from here

+
+
getPatterns()

Complete set of patterns will be retrieved with this function

+
+
save(oFile)

Complete set of frequent patterns will be loaded in to a output file

+
+
getPatternsAsDataFrame()

Complete set of frequent patterns will be loaded in to a dataframe

+
+
getMemoryUSS()

Total amount of USS memory consumed by the mining process will be retrieved from this function

+
+
getMemoryRSS()

Total amount of RSS memory consumed by the mining process will be retrieved from this function

+
+
getRuntime()

Total amount of runtime taken by the mining process will be retrieved from this function

+
+
creatingItemSets(fileName)

Scans the dataset and stores in a list format

+
+
frequentOneItem()

Extracts the one-length frequent patterns from database

+
+
updateTransactions()

Update the transactions by removing non-frequent items and sort the Database by item decreased support

+
+
buildTree()

After updating the Database, remaining items will be added into the tree by setting root node as null

+
+
convert()

to convert the user specified value

+
+
startMine()

Mining process will start from this function

+
+
+
+
+
+

Methods to execute code on terminal

+
Format:
+
+(.venv) $ python3 basic.py <inputFile> <outputFile> <minSup>
+
+Example Usage:
+
+(.venv) $ python3 basic.py sampleTDB.txt patterns.txt 3
+
+
+        .. note:: minSup  will be considered in support count or frequency
+
+
+
+
+

Importing this algorithm into a python program

+
from PAMI.weightedUncertainFrequentPattern.basic import basic as alg
+
+obj = alg.basic(iFile, wFile, expSup, expWSup)
+
+obj.startMine()
+
+Patterns = obj.getPatterns()
+
+print("Total number of  Patterns:", len(Patterns))
+
+obj.save(oFile)
+
+Df = obj.getPatternsAsDataFrame()
+
+memUSS = obj.getMemoryUSS()
+
+print("Total Memory in USS:", memUSS)
+
+memRSS = obj.getMemoryRSS()
+
+print("Total Memory in RSS", memRSS)
+
+run = obj.getRuntime()
+
+print("Total ExecutionTime in seconds:", run)
+
+
+
+
+getMemoryRSS() float[source]
+

Total amount of RSS memory consumed by the mining process will be retrieved from this function +:return: returning RSS memory consumed by the mining process +:rtype: float

+
+ +
+
+getMemoryUSS() float[source]
+

Total amount of USS memory consumed by the mining process will be retrieved from this function +:return: returning USS memory consumed by the mining process +:rtype: float

+
+ +
+
+getPatterns() dict[source]
+

Function to send the set of frequent patterns after completion of the mining process +:return: returning frequent patterns +:rtype: dict

+
+ +
+
+getPatternsAsDataFrame() DataFrame[source]
+

Storing final frequent patterns in a dataframe +:return: returning frequent patterns in a dataframe +:rtype: pd.DataFrame

+
+ +
+
+getRuntime() float[source]
+

Calculating the total amount of runtime taken by the mining process +:return: returning total amount of runtime taken by the mining process +:rtype: float

+
+ +
+
+mine() None[source]
+

mine() method where the patterns are mined by constructing tree and remove the false patterns by counting the original support of a patternS

+
+ +
+
+printResults() None[source]
+

This function is used to print the results +:return: None

+
+ +
+
+save(outFile: str) None[source]
+

Complete set of frequent patterns will be loaded in to an output file

+
+
Parameters:
+

outFile (csv file) – Specify name of the output file

+
+
Returns:
+

None

+
+
+
+ +
+
+startMine() None[source]
+

startMine() method where the patterns are mined by constructing tree and remove the false patterns by counting the original support of a patterns.

+
+ +
+
+ +
+
+

PAMI.weightedUncertainFrequentPattern.basic.abstract module

+
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/PAMI.weightedUncertainFrequentPattern.html b/sphinx/_build/html/PAMI.weightedUncertainFrequentPattern.html new file mode 100644 index 000000000..a982694f9 --- /dev/null +++ b/sphinx/_build/html/PAMI.weightedUncertainFrequentPattern.html @@ -0,0 +1,194 @@ + + + + + + + PAMI.weightedUncertainFrequentPattern package — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/AssociationRules/basic/ARWithConfidence.html b/sphinx/_build/html/_modules/PAMI/AssociationRules/basic/ARWithConfidence.html new file mode 100644 index 000000000..0e9c3ead3 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/AssociationRules/basic/ARWithConfidence.html @@ -0,0 +1,553 @@ + + + + + + PAMI.AssociationRules.basic.ARWithConfidence — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.AssociationRules.basic.ARWithConfidence

+# This code uses "confidence" metric to extract the association rules from given frequent patterns.
+#
+# **Importing this algorithm into a python program**
+# ----------------------------------------------------
+#
+#
+#             import PAMI.AssociationRules.basic import ARWithConfidence as alg
+#
+#             obj = alg.ARWithConfidence(iFile, minConf)
+#
+#             obj.mine()
+#
+#             associationRules = obj.getPatterns()
+#
+#             print("Total number of Association Rules:", len(associationRules))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+     
+"""
+
+
+
+
+from PAMI.AssociationRules.basic import abstract as _ab
+from deprecated import deprecated
+
+
+
+class _Confidence:
+    """
+    :param  patterns: Dictionary containing patterns and its support value.
+    :type patterns: dict
+    :param  singleItems: List containing all the single frequent items.
+    :type singleItems: list
+    :param  minConf: Minimum confidence to mine all the satisfying association rules.
+    :type minConf: int
+    """
+
+    def __init__(self, patterns, singleItems, minConf):
+        """
+        :param patterns: given frequent patterns
+        :type patterns: dict
+        :param singleItems: one-length frequent patterns
+        :type singleItems: list
+        :param minConf: minimum confidence
+        :type minConf: float
+        """
+        self._frequentPatterns = patterns
+        self._singleItems = singleItems
+        self._minConf = minConf
+        self._finalPatterns = {}
+
+    def _generation(self, prefix, suffix):
+        """
+        To generate the combinations all association rules.
+
+        :param prefix: the prefix of association rule.
+        :type prefix: str
+        :param suffix: the suffix of association rule.
+        :type suffix: str
+        """
+        if len(suffix) == 1:
+            conf = self._generateWithConfidence(prefix, suffix[0])
+        for i in range(len(suffix)):
+            suffix1 = suffix[:i] + suffix[i + 1:]
+            prefix1 = prefix + ' ' + suffix[i]
+            for j in range(i + 1, len(suffix)):
+                self._generateWithConfidence(prefix + ' ' + suffix[i], suffix[j])
+                # self._generation(prefix+ ' ' +suffix[i], suffix[i+1:])
+            self._generation(prefix1, suffix1)
+
+    def _generateWithConfidence(self, lhs, rhs):
+        """
+        To find association rules satisfying user-specified minConf
+
+        :param lhs: the prefix of association rule.
+        :type lhs: str
+        :param rhs: the suffix of association rule.
+        :type rhs: str
+        """
+        s = lhs + '\t' + rhs
+        if self._frequentPatterns.get(s) == None:
+            return 0
+        minimum = self._frequentPatterns[s]
+        conf_lhs = minimum / self._frequentPatterns[lhs]
+        conf_rhs = minimum / self._frequentPatterns[rhs]
+        if conf_lhs >= self._minConf:
+            s1 = lhs + '->' + rhs
+            self._finalPatterns[s1] = conf_lhs
+        if conf_rhs >= self._minConf:
+            s1 = rhs + '->' + lhs
+            self._finalPatterns[s1] = conf_rhs
+
+    def run(self):
+        """
+        To generate the combinations all association rules.
+        """
+        for i in range(len(self._singleItems)):
+            suffix = self._singleItems[:i] + self._singleItems[i + 1:]
+            prefix = self._singleItems[i]
+            for j in range(i + 1, len(self._singleItems)):
+                self._generateWithConfidence(self._singleItems[i], self._singleItems[j])
+            self._generation(prefix, suffix)
+
+
+
+[docs] +class ARWithConfidence: + """ + About this algorithm + ==================== + + :Description: Association Rules are derived from frequent patterns using "confidence" metric. + + :Reference: + + :param iFile: str : + Name of the Input file to mine complete set of association rules + :param oFile: str : + Name of the output file to store complete set of association rules + :param minConf: float : + The user can specify the minConf in float between the range of 0 to 1. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + + :Attributes: + + startTime : float + To record the start time of the mining process + + endTime : float + To record the completion time of the mining process + + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + + memoryUSS : float + To store the total amount of USS memory consumed by the program + + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + + Execution methods + ================= + + **Terminal command** + + .. code-block:: console + + Format: + + (.venv) $ python3 ARWithConfidence.py <inputFile> <outputFile> <minConf> <sep> + + Example Usage: + + (.venv) $ python3 ARWithConfidence.py sampleDB.txt patterns.txt 0.5 ' ' + + .. note:: minConf can be specified in a value between 0 and 1. + + + **Calling from a python program** + + .. code-block:: python + + import PAMI.AssociationRules.basic import ARWithConfidence as alg + + obj = alg.ARWithConfidence(iFile, minConf) + + obj.mine() + + associationRules = obj.getPatterns() + + print("Total number of Association Rules:", len(associationRules)) + + obj.save(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + + Credits + ======= + + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + """ + + _minConf = float() + _startTime = float() + _endTime = float() + _iFile = " " + _oFile = " " + _Sep = " " + _memoryUSS = float() + _memoryRSS = float() + _frequentPatterns = {} + + def __init__(self, iFile, minConf, sep): + """ + :param iFile: input file name or path + :type iFile: str + :param minConf: minimum confidence + :type minConf: float + :param sep: Delimiter of input file + :type sep: str + """ + self._iFile = iFile + self._minConf = minConf + self._finalPatterns = {} + self._sep = sep + + def _readPatterns(self): + """ + Reading the input file and storing all the frequent patterns and their support respectively in a frequentPatterns variable. + """ + self._frequentPatterns = {} + k = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + pattern, sup = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'pattern' in i: + pattern = self._iFile['pattern'].tolist() + if 'support' in i: + support = self._iFile['support'].tolist() + for i in range(len(pattern)): + s = '\t'.join(pattern[i]) + self._frequentPattern[s] = support[i] + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line = line.strip() + line = line.split(':') + s = line[0].split(self._sep) + s = '\t'.join(s) + self._frequentPatterns[s.strip()] = int(line[1]) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + line = line.split(':') + s = line[0].split(self._sep) + for j in s: + if j not in k: + k.append(j) + s = '\t'.join(s) + self._frequentPatterns[s.strip()] = int(line[1]) + except IOError: + print("File Not Found") + quit() + return k + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + Association rule mining process will start from here + """ + self.mine()
+ + + + +
+[docs] + def mine(self): + """ + Association rule mining process will start from here + """ + self._startTime = _ab._time.time() + k = self._readPatterns() + a = _Confidence(self._frequentPatterns, k, self._minConf) + a.run() + self._finalPatterns = a._finalPatterns + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Association rules successfully generated from frequent patterns ")
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + # dataFrame = dataFrame.replace(r'\r+|\n+|\t+',' ', regex=True) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the outputfile + :type outFile: file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + """ + Function to send the result after completion of the mining process + """ + print("Total number of Association Rules:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = ARWithConfidence(_ab._sys.argv[1], float(_ab._sys.argv[3]), _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = ARWithConfidence(_ab._sys.argv[1], _ab._sys.argv[3]) + _ap.startMine() + _ap.mine() + print("Total number of Association Rules:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/AssociationRules/basic/ARWithLeverage.html b/sphinx/_build/html/_modules/PAMI/AssociationRules/basic/ARWithLeverage.html new file mode 100644 index 000000000..33e6ae03b --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/AssociationRules/basic/ARWithLeverage.html @@ -0,0 +1,549 @@ + + + + + + PAMI.AssociationRules.basic.ARWithLeverage — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.AssociationRules.basic.ARWithLeverage

+# This code uses "leverage" metric to extract the association rules from given frequent patterns.
+#
+# **Importing this algorithm into a python program**
+# ----------------------------------------------------
+#
+#
+#             import PAMI.AssociationRules.basic import ARWithLeverage as alg
+#
+#             obj = alg.ARWithLeverage(iFile, minConf)
+#
+#             obj.mine()
+#
+#             associationRules = obj.getPatterns()
+#
+#             print("Total number of Association Rules:", len(associationRules))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+
+from PAMI.AssociationRules.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+from deprecated import deprecated
+
+
+class _Leverage:
+
+    """
+    :param patterns: Dictionary containing patterns and its support value.
+    :type patterns: dict
+    :param  singleItems: List containing all the single frequent items.
+    :type singleItems: list
+    :param  minConf: Minimum confidence to mine all the satisfying association rules.
+    :type minConf: int
+    """
+
+    def __init__(self, patterns, singleItems, minConf) -> None:
+        """
+        :param patterns: given frequent patterns
+        :type patterns: dict
+        :param singleItems: one-length frequent patterns
+        :type singleItems: list
+        :param minConf: minimum confidence
+        :type minConf: float
+        :return: None
+        """
+        self._frequentPatterns = patterns
+        self._singleItems = singleItems
+        self._minConf = minConf
+        self._finalPatterns = {}
+
+    def _generation(self, prefix, suffix) -> None:
+        """
+        To generate the combinations all association rules.
+
+        :param prefix: the prefix of association rule.
+        :type prefix: str
+        :param suffix: the suffix of association rule.
+        :type suffix: str
+        """
+
+
+        if len(suffix) == 1:
+            conf = self._generateWithLeverage(prefix, suffix[0])
+        for i in range(len(suffix)):
+            suffix1 = suffix[:i] + suffix[i + 1:]
+            prefix1 = prefix + ' ' + suffix[i]
+            for j in range(i + 1, len(suffix)):
+                self._generateWithLeverage(prefix + ' ' + suffix[i], suffix[j])
+            self._generation(prefix1, suffix1)
+
+    def _generateWithLeverage(self, lhs, rhs) -> float:
+        """
+        To find association rules satisfying user-specified minConf
+
+        :param lhs: the prefix of association rule.
+        :type lhs: str
+        :param rhs: the suffix of association rule.
+        :type rhs: str
+        :return: the association rule
+        :rtype: float
+        """
+        s = lhs + '\t' + rhs
+        if self._frequentPatterns.get(s) == None:
+            return 0
+        minimum = self._frequentPatterns[s]
+        conf_lhs = minimum / self._frequentPatterns[lhs]
+        conf_rhs = minimum / self._frequentPatterns[rhs]
+        lift_lhs = conf_lhs - self._frequentPatterns[rhs] * self._frequentPatterns[lhs]
+        right_rhs = conf_rhs - self._frequentPatterns[lhs] * self._frequentPatterns[rhs]
+        if lift_lhs >= self._minConf:
+            s1 = lhs + '->' + rhs
+            self._finalPatterns[s1] = conf_lhs
+        if right_rhs >= self._minConf:
+            s1 = rhs + '->' + lhs
+            self._finalPatterns[s1] = conf_rhs
+
+    def run(self) -> None:
+        """
+        To generate the combinations all association rules.
+        """
+        for i in range(len(self._singleItems)):
+            suffix = self._singleItems[:i] + self._singleItems[i + 1:]
+            prefix = self._singleItems[i]
+            for j in range(i + 1, len(self._singleItems)):
+                conf = self._generateWithLeverage(self._singleItems[i], self._singleItems[j])
+            self._generation(prefix, suffix)
+
+
+
+[docs] +class ARWithLeverage: + """ + About this algorithm + ==================== + + :Description: Association Rules are derived from frequent patterns using "leverage" metric. + + :Reference: + + :param iFile: str : + Name of the Input file to mine complete set of association rules + :param oFile: str : + Name of the output file to store complete set of association rules + :param minConf: float : + The user can specify the minConf in float between the range of 0 to 1. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + + startTime : float + To record the start time of the mining process + + endTime : float + To record the completion time of the mining process + + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + + memoryUSS : float + To store the total amount of USS memory consumed by the program + + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + + Execution methods + ================= + + **Terminal command** + + .. code-block:: console + + Format: + + (.venv) $ python3 ARWithLeverage.py <inputFile> <outputFile> <minConf> <sep> + + Example Usage: + + (.venv) $ python3 ARWithLeverage.py sampleDB.txt patterns.txt 10.0 ' ' + + .. note:: minConf can be specified in a value between 0 and 1. + + + **Calling from a python program** + + .. code-block:: python + + import PAMI.AssociationRules.basic import ARWithLeverage as alg + + obj = alg.ARWithLeverage(iFile, minConf) + + obj.mine() + + associationRules = obj.getPatterns() + + print("Total number of Association Rules:", len(associationRules)) + + obj.save(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + Credits + ======= + + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + def __init__(self, iFile, minConf, sep) -> None: + """ + :param iFile: input file name or path + :type iFile: str + :param minConf: The user can specify the minConf in float between the range of 0 to 1. + :type minConf: float + :param sep: This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + :type sep: str + :return: None + """ + self._iFile = iFile + self._minConf = minConf + self._finalPatterns = {} + self._sep = sep + + def _readPatterns(self) -> list: + """ + To read patterns of leverage + + :return: List of patterns + :rtype: list + """ + self._frequentPatterns = {} + k = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + pattern, sup = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'pattern' in i: + pattern = self._iFile['pattern'].tolist() + if 'support' in i: + support = self._iFile['support'].tolist() + for i in range(len(pattern)): + s = '\t'.join(pattern[i]) + self._frequentPattern[s] = support[i] + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line = line.strip() + line = line.split(':') + s = line[0].split(self._sep) + s = '\t'.join(s) + self._frequentPatterns[s.strip()] = int(line[1]) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + line = line.split(':') + s = line[0].split(self._sep) + for j in s: + if j not in k: + k.append(j) + s = '\t'.join(s) + self._frequentPatterns[s.strip()] = int(line[1]) + except IOError: + print("File Not Found") + quit() + return k + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + Association rule mining process will start from here + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + Association rule mining process will start from here + """ + self._startTime = _ab._time.time() + k = self._readPatterns() + a = _Leverage(self._frequentPatterns, k, self._minConf) + a.run() + self._finalPatterns = a._finalPatterns + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Association rules successfully generated from frequent patterns ")
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + # dataFrame = dataFrame.replace(r'\r+|\n+|\t+',' ', regex=True) + return dataFrame
+ + +
+[docs] + def save(self, outFile) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the outputfile + :type outFile: file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> dict: + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + Function to send the result after completion of the mining process + """ + print("Total number of Association Rules:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = ARWithLeverage(_ab._sys.argv[1], float(_ab._sys.argv[3]), _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = ARWithLeverage(_ab._sys.argv[1], _ab._sys.argv[3]) + _ap.startMine() + _ap.mine() + print("Total number of Association Rules:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/AssociationRules/basic/ARWithLift.html b/sphinx/_build/html/_modules/PAMI/AssociationRules/basic/ARWithLift.html new file mode 100644 index 000000000..22abab83b --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/AssociationRules/basic/ARWithLift.html @@ -0,0 +1,554 @@ + + + + + + PAMI.AssociationRules.basic.ARWithLift — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.AssociationRules.basic.ARWithLift

+# This code uses "lift" metric to extract the association rules from given frequent patterns.
+#
+#
+# **Importing this algorithm into a python program**
+# ----------------------------------------------------
+#
+#             import PAMI.AssociationRules.basic import ARWithLift as alg
+#
+#             obj = alg.ARWithLift(iFile, minConf)
+#
+#             obj.mine()
+#
+#             associationRules = obj.getPatterns()
+#
+#             print("Total number of Association Rules:", len(associationRules))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+
+
+from PAMI.AssociationRules.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+from deprecated import deprecated
+
+
+[docs] +class Lift: + + """ + :param patterns: Dictionary containing patterns and its support value. + :type patterns: dict + :param singleItems: List containing all the single frequent items. + :type singleItems: list + :param minConf: Minimum confidence to mine all the satisfying association rules. + :type minConf: int + """ + + def __init__(self, patterns, singleItems, minConf) -> None: + """ + :param patterns: given frequent patterns + :type patterns: dict + :param singleItems: one-length frequent patterns + :type singleItems: list + :param minConf: minimum confidence + :type minConf: float + :return: None + """ + self._frequentPatterns = patterns + self._singleItems = singleItems + self._minConf = minConf + self._finalPatterns = {} + + def _generation(self, prefix, suffix) -> None: + """ + To generate the combinations all association rules. + + :param prefix: the prefix of association rule. + :type prefix: str + :param suffix: the suffix of association rule. + :type suffix: str + :return: None + """ + if len(suffix) == 1: + self._generateWithLift(prefix, suffix[0]) + for i in range(len(suffix)): + suffix1 = suffix[:i] + suffix[i + 1:] + prefix1 = prefix + ' ' + suffix[i] + for j in range(i + 1, len(suffix)): + self._generateWithLift(prefix + ' ' + suffix[i], suffix[j]) + # self._generation(prefix+ ' ' +suffix[i], suffix[i+1:]) + self._generation(prefix1, suffix1) + + def _generateWithLift(self, lhs, rhs) -> float: + """ + To find association rules satisfying user-specified minConf + + :param lhs: the prefix of association rule. + :type lhs: str + :param rhs: the suffix of association rule. + :type rhs: str + :return: the association rule + :rtype: float + """ + s = lhs + '\t' + rhs + if self._frequentPatterns.get(s) == None: + return 0 + minimum = self._frequentPatterns[s] + conf_lhs = minimum / self._frequentPatterns[lhs] + conf_rhs = minimum / self._frequentPatterns[rhs] + lift_lhs = conf_lhs / self._frequentPatterns[rhs] * self._frequentPatterns[lhs] + right_rhs = conf_rhs / self._frequentPatterns[lhs] * self._frequentPatterns[rhs] + if lift_lhs >= self._minConf: + s1 = lhs + '->' + rhs + self._finalPatterns[s1] = conf_lhs + if right_rhs >= self._minConf: + s1 = rhs + '->' + lhs + self._finalPatterns[s1] = conf_rhs + +
+[docs] + def run(self) -> None: + """ + To generate the combinations all association rules. + """ + for i in range(len(self._singleItems)): + suffix = self._singleItems[:i] + self._singleItems[i + 1:] + prefix = self._singleItems[i] + for j in range(i + 1, len(self._singleItems)): + self._generateWithLift(self._singleItems[i], self._singleItems[j]) + self._generation(prefix, suffix)
+
+ + + +
+[docs] +class ARWithLift: + """ + About this algorithm + ==================== + + :Description: Association Rules are derived from frequent patterns using "lift" metric. + + :Reference: + + :param iFile: str : + Name of the Input file to mine complete set of association rules + :param oFile: str : + Name of the output file to store complete set of association rules + :param minConf: float : + The user can specify the minConf in float between the range of 0 to 1. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + startTime : float + To record the start time of the mining process + + endTime : float + To record the completion time of the mining process + + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + + memoryUSS : float + To store the total amount of USS memory consumed by the program + + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + + Execution methods + ================= + + **Terminal command** + + .. code-block:: console + + Format: + + (.venv) $ python3 ARWithLift.py <inputFile> <outputFile> <minConf> <sep> + + Example Usage: + + (.venv) $ python3 ARWithLift.py sampleDB.txt patterns.txt 0.5 ' ' + + .. note:: minConf can be specified in a value between 0 and 1. + + + **Calling from a python program** + + .. code-block:: python + + import PAMI.AssociationRules.basic import ARWithLift as alg + + obj = alg.ARWithLift(iFile, minConf) + + obj.mine() + + associationRules = obj.getPatterns() + + print("Total number of Association Rules:", len(associationRules)) + + obj.save(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + Credits + ======= + + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + def __init__(self, iFile, minConf, sep) -> None: + """ + :param iFile: input file name or path + :type iFile: str + :param minConf: minimum confidence + :type minConf: float + :param sep: Delimiter of input file + :type sep: str + :return: None + """ + self._iFile = iFile + self._minConf = minConf + self._finalPatterns = {} + self._sep = sep + + def _readPatterns(self) -> list: + """ + Reading the input file and storing all the frequent patterns and their support respectively in a frequentPatterns variable. + + :return: list of frequent patterns and their support respectively in a frequentPatterns + :rtype: list + """ + self._frequentPatterns = {} + k = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + pattern, sup = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'pattern' in i: + pattern = self._iFile['pattern'].tolist() + if 'support' in i: + support = self._iFile['support'].tolist() + for i in range(len(pattern)): + s = '\t'.join(pattern[i]) + self._frequentPattern[s] = support[i] + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line = line.strip() + line = line.split(':') + s = line[0].split(self._sep) + s = '\t'.join(s) + self._frequentPatterns[s.strip()] = int(line[1]) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + line = line.split(':') + s = line[0].split(self._sep) + for j in s: + if j not in k: + k.append(j) + s = '\t'.join(s) + self._frequentPatterns[s.strip()] = int(line[1]) + except IOError: + print("File Not Found") + quit() + return k + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + Association rule mining process will start from here + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + Association rule mining process will start from here + """ + self._startTime = _ab._time.time() + k = self._readPatterns() + a = Lift(self._frequentPatterns, k, self._minConf) + a.run() + self._finalPatterns = a._finalPatterns + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Association rules successfully generated from frequent patterns ")
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + # dataFrame = dataFrame.replace(r'\r+|\n+|\t+',' ', regex=True) + return dataFrame
+ + +
+[docs] + def save(self, outFile) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the outputfile + :type outFile: file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> dict: + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + Function to send the result after completion of the mining process + """ + print("Total number of Association Rules:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = ARWithLift(_ab._sys.argv[1], float(_ab._sys.argv[3]), _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = ARWithLift(_ab._sys.argv[1], _ab._sys.argv[3]) + _ap.startMine() + _ap.mine() + print("Total number of Association Rules:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/AssociationRules/basic/RuleMiner.html b/sphinx/_build/html/_modules/PAMI/AssociationRules/basic/RuleMiner.html new file mode 100644 index 000000000..a373319ce --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/AssociationRules/basic/RuleMiner.html @@ -0,0 +1,708 @@ + + + + + + PAMI.AssociationRules.basic.RuleMiner — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.AssociationRules.basic.RuleMiner

+# RuleMiner code is used to extract the association rules from given frequent patterns.
+#
+# **Importing this algorithm into a python program**
+# ----------------------------------------------------
+#
+#             import PAMI.AssociationRules.basic import RuleMiner as alg
+#
+#             obj = alg.RuleMiner(iFile, measure, o.5, "\t")
+#
+#             obj.mine()
+#
+#             associationRules = obj.getPatterns()
+#
+#             print("Total number of Association Rules:", len(associationRules))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+from PAMI.AssociationRules.basic import abstract as _ab
+from deprecated import deprecated
+
+
+[docs] +class Confidence: + """ + Association Rules are derived from frequent patterns using "confidence" metric. + """ + + def __init__(self, patterns, singleItems, threshold): + self._frequentPatterns = patterns + self._singleItems = singleItems + self._threshold = threshold + self._finalPatterns = {} + + def _generation(self, prefix, suffix): + """ + To generate the combinations all association rules. + + :param prefix: the prefix of association rule. + :type prefix: str + :param suffix: the suffix of association rule. + :type suffix: str + """ + if len(suffix) == 1: + conf = self._generaeWithConfidence(prefix, suffix[0]) + for i in range(len(suffix)): + suffix1 = suffix[:i] + suffix[i+1:] + prefix1 = prefix + ' ' + suffix[i] + for j in range(i+1, len(suffix)): + self._generaeWithConfidence(prefix + ' ' + suffix[i], suffix[j]) + #self._generation(prefix+ ' ' +suffix[i], suffix[i+1:]) + self._generation(prefix1, suffix1) + + def _generaeWithConfidence(self, lhs, rhs): + """ + To find association rules satisfying user-specified minConf + + :param lhs: the prefix of association rule. + :type lhs: str + :param rhs: the suffix of association rule. + :type rhs: str + """ + s = lhs + '\t' + rhs + if self._frequentPatterns.get(s) == None: + return 0 + minimum = self._frequentPatterns[s] + conflhs = minimum / self._frequentPatterns[lhs] + confrhs = minimum / self._frequentPatterns[rhs] + if conflhs >= self._threshold: + s1 = lhs + '->' + rhs + self._finalPatterns[s1] = conflhs + if confrhs >= self._threshold: + s1 = rhs + '->' + lhs + self._finalPatterns[s1] = confrhs + +
+[docs] + def run(self): + """ + To generate the combinations all association rules. + """ + for i in range(len(self._singleItems)): + suffix = self._singleItems[:i] + self._singleItems[i+1:] + prefix = self._singleItems[i] + for j in range(i+1, len(self._singleItems)): + self._generaeWithConfidence(self._singleItems[i], self._singleItems[j]) + self._generation(prefix, suffix)
+
+ + + + +
+[docs] +class Lift: + """ + Association Rules are derived from frequent patterns using "lift" metric. + """ + + def __init__(self, patterns, singleItems, threshold): + """ + :param patterns: given frequent patterns + :type patterns: dict + :param singleItems: one-length frequent patterns + :type singleItems: list + :param threshold: threshold for lifting rules + :type threshold: float + """ + self._frequentPatterns = patterns + self._singleItems = singleItems + self._threshold = threshold + self._finalPatterns = {} + + def _generation(self, prefix, suffix): + """ + To generate the combinations all association rules. + + :param prefix: the prefix of association rule. + :type prefix: str + :param suffix: the suffix of association rule. + :type suffix: str + """ + if len(suffix) == 1: + self._generateWithLift(prefix, suffix[0]) + for i in range(len(suffix)): + suffix1 = suffix[:i] + suffix[i+1:] + prefix1 = prefix + ' ' + suffix[i] + for j in range(i+1, len(suffix)): + self._generateWithLift(prefix + ' ' + suffix[i], suffix[j]) + #self._generation(prefix+ ' ' +suffix[i], suffix[i+1:]) + self._generation(prefix1, suffix1) + + def _generateWithLift(self, lhs, rhs): + """ + To find association rules satisfying user-specified minConf + + :param lhs: the prefix of association rule. + :type lhs: str + :param rhs: the suffix of association rule. + :type rhs: str + """ + s = lhs + '\t' + rhs + if self._frequentPatterns.get(s) == None: + return 0 + minimum = self._frequentPatterns[s] + conflhs = minimum / self._frequentPatterns[lhs] + confrhs = minimum / self._frequentPatterns[rhs] + liftlhs = conflhs / self._frequentPatterns[rhs] * self._frequentPatterns[lhs] + rightrhs = confrhs / self._frequentPatterns[lhs] * self._frequentPatterns[rhs] + if liftlhs >= self._threshold: + s1 = lhs + '->' + rhs + self._finalPatterns[s1] = conflhs + if rightrhs >= self._threshold: + s1 = rhs + '->' + lhs + self._finalPatterns[s1] = confrhs + +
+[docs] + def run(self): + """ + To generate the combinations all association rules. + """ + for i in range(len(self._singleItems)): + suffix = self._singleItems[:i] + self._singleItems[i+1:] + prefix = self._singleItems[i] + for j in range(i+1, len(self._singleItems)): + self._generateWithLift(self._singleItems[i], self._singleItems[j]) + self._generation(prefix, suffix)
+
+ + + + +
+[docs] +class Leverage: + """ + Association Rules are derived from frequent patterns using "leverage" metric. + """ + + def __init__(self, patterns, singleItems, threshold): + """ + :param patterns: given frequent patterns + :type patterns: dict + :param singleItems: one-length frequent patterns + :type singleItems: list + :param threshold: threshold for lifting rules + :type threshold: float + """ + self._frequentPatterns = patterns + self._singleItems = singleItems + self._threshold = threshold + self._finalPatterns = {} + + def _generation(self, prefix, suffix): + """ + To generate the combinations all association rules. + + :param prefix: the prefix of association rule. + :type prefix: str + :param suffix: the suffix of association rule. + :type suffix: str + """ + if len(suffix) == 1: + conf = self._generateWithLeverage(prefix, suffix[0]) + for i in range(len(suffix)): + suffix1 = suffix[:i] + suffix[i+1:] + prefix1 = prefix + ' ' + suffix[i] + for j in range(i+1, len(suffix)): + self._generateWithLeverage(prefix + ' ' + suffix[i], suffix[j]) + self._generation(prefix1, suffix1) + + def _generateWithLeverage(self, lhs, rhs): + """ + To find association rules satisfying user-specified minConf + + :param lhs: the prefix of association rule. + :type lhs: str + :param rhs: the suffix of association rule. + :type rhs: str + """ + s = lhs + '\t' + rhs + if self._frequentPatterns.get(s) == None: + return 0 + minimum = self._frequentPatterns[s] + conflhs = minimum / self._frequentPatterns[lhs] + confrhs = minimum / self._frequentPatterns[rhs] + liftlhs = conflhs - self._frequentPatterns[rhs] * self._frequentPatterns[lhs] + rightrhs = confrhs - self._frequentPatterns[lhs] * self._frequentPatterns[rhs] + if liftlhs >= self._threshold: + s1 = lhs + '->' + rhs + self._finalPatterns[s1] = conflhs + if rightrhs >= self._threshold: + s1 = rhs + '->' + lhs + self._finalPatterns[s1] = confrhs + +
+[docs] + def run(self): + """ + To generate the combinations all association rules. + """ + for i in range(len(self._singleItems)): + suffix = self._singleItems[:i] + self._singleItems[i+1:] + prefix = self._singleItems[i] + for j in range(i+1, len(self._singleItems)): + conf = self._generateWithLeverage(self._singleItems[i], self._singleItems[j]) + self._generation(prefix, suffix)
+
+ + +
+[docs] +class RuleMiner: + """ + About this algorithm + ==================== + + :Description: RuleMiner code is used to extract the association rules from given frequent patterns + + :Reference: + + + :param iFile: str : + Name of the Input file to mine complete set of association rules + :param oFile: str : + Name of the output file to store complete set of association rules + :param minConf: float : + The user can specify the minConf in float between the range of 0 to 1. + :param frequentPattern: list or dict : + frequent patterns are stored in the form of list or dictionary + :param measure: str : + condition to calculate the strength of rule + :param threshold: int : + condition to satisfy + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + + startTime : float + To record the start time of the mining process + + endTime : float + To record the completion time of the mining process + + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + + memoryUSS : float + To store the total amount of USS memory consumed by the program + + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + + Execution methods + ================= + + **Terminal command** + + .. code-block:: console + + Format: + + (.venv) $ python3 RuleMiner.py <inputFile> <outputFile> <minConf> <sep> + + Example Usage: + + (.venv) $ python3 RuleMiner.py sampleDB.txt patterns.txt 0.5 ' ' + + .. note:: minConf can be specified in a value between 0 and 1. + + + **Calling from a python program** + + .. code-block:: python + + import PAMI.AssociationRules.basic import RuleMiner as alg + + obj = alg.RuleMiner(iFile, measure, o.5, "\t") + + obj.mine() + + associationRules = obj.getPatterns() + + print("Total number of Association Rules:", len(associationRules)) + + obj.save(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + :Methods: + + mine() + """ + + def __init__(self, iFile, measure, threshold, sep): + """ + :param iFile: input file name or path + :type iFile: str + :param measure: measure + :type measure: str + :param threshold: threshold for lifting rules + :type threshold: float + :param sep: Delimiter of input file + :type sep: str + """ + self._iFile = iFile + self._measure = measure + self._threshold = threshold + self._finalPatterns = {} + self._sep = sep + + def _readPatterns(self): + """ + Reading the input file and storing all the frequent patterns and their support respectively in a frequentPatterns variable. + """ + self._frequentPatterns = {} + k = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + pattern, sup = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'pattern' in i: + pattern = self._iFile['pattern'].tolist() + if 'support' in i: + support = self._iFile['support'].tolist() + for i in range(len(pattern)): + s = '\t'.join(pattern[i]) + self._frequentPattern[s] = support[i] + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line = line.strip() + line = line.split(':') + s = line[0].split(self._sep) + s = '\t'.join(s) + self._frequentPatterns[s.strip()] = int(line[1]) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + line = line.split(':') + s = line[0].split(self._sep) + for j in s: + if j not in k: + k.append(j) + s = '\t'.join(s) + self._frequentPatterns[s.strip()] = int(line[1]) + except IOError: + print("File Not Found") + quit() + return k + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + Association rule mining process will start from here + """ + self.mine()
+ + + +
+[docs] + def mine(self): + """ + Association rule mining process will start from here + """ + self._startTime = _ab._time.time() + k = self._readPatterns() + if self._measure == 'confidence': + a = Confidence(self._frequentPatterns, k, self._threshold) + a.run() + self._finalPatterns = a._finalPatterns + if self._measure == 'lift': + a = Lift(self._frequentPatterns, k, self._threshold) + a.run() + self._finalPatterns = a._finalPatterns + if self._measure == 'leverage': + a = Leverage(self._frequentPatterns, k, self._threshold) + a.run() + self._finalPatterns = a._finalPatterns + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Association rules successfully generated from frequent patterns ")
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + # dataFrame = dataFrame.replace(r'\r+|\n+|\t+',' ', regex=True) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to a output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + """ + Function to send the result after completion of the mining process + """ + print("Total number of Association Rules:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = RuleMiner(_ab._sys.argv[1], _ab._sys.argv[3], float(_ab._sys.argv[4]), _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = RuleMiner(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + _ap.mine() + print("Total number of Association Rules:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + _ap = RuleMiner('sensorOutput.txt', "lift", 0.5, '\t') + _ap.startMine() + _ap.mine() + _ap.save('output.txt') + _ap.printResults() + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/correlatedPattern/basic/CoMine.html b/sphinx/_build/html/_modules/PAMI/correlatedPattern/basic/CoMine.html new file mode 100644 index 000000000..edc9537f2 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/correlatedPattern/basic/CoMine.html @@ -0,0 +1,852 @@ + + + + + + PAMI.correlatedPattern.basic.CoMine — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.correlatedPattern.basic.CoMine

+# CoMine is one of the fundamental algorithm to discover correlated patterns in a transactional database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.correlatedPattern.basic import CoMine as alg
+#
+#             obj = alg.CoMine(iFile, minSup, minAllConf, sep)
+#
+#             obj.mine()
+#
+#             Rules = obj.getPatterns()
+#
+#             print("Total number of  Patterns:", len(Patterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.correlatedPattern.basic import abstract as _ab
+import pandas as _pd
+from typing import List, Dict, Tuple, Union
+from deprecated import deprecated
+
+class _Node:
+    """
+    A class used to represent the node of correlatedPatternTree
+
+
+    :Attributes:
+
+        itemId : int
+            storing item of a node
+        counter : int
+            To maintain the support of node
+        parent : node
+            To maintain the parent of every node
+        child : list
+            To maintain the children of node
+        nodeLink : node
+            Points to the node with same itemId
+
+    :Methods:
+
+        getChild(itemName)
+            returns the node with same itemName from correlatedPatternTree
+    """
+
+    def __init__(self) -> None:
+        self.itemId = -1
+        self.counter = 1
+        self.parent = None
+        self.child = []
+        self.nodeLink = None
+
+    def getChild(self, id1) -> Union[None, '_Node']:
+        """
+        :param id1: give item id as input
+        :type id1: int
+        :return: the node with same itemId
+        :rtype: _Node
+        """
+        for i in self.child:
+            if i.itemId == id1:
+                return i
+        return None
+
+
+class _Tree:
+    """
+    A class used to represent the correlatedPatternGrowth tree structure
+
+    :Attributes:
+
+        headerList : list
+            storing the list of items in tree sorted in ascending of their supports
+        mapItemNodes : dictionary
+            storing the nodes with same item name
+        mapItemLastNodes : dictionary
+            representing the map that indicates the last node for each item
+        root : Node
+            representing the root Node in a tree
+
+
+    :Methods:
+
+        createHeaderList(items,minSup)
+            takes items only which are greater than minSup and sort the items in ascending order
+        addTransaction(transaction)
+            creating transaction as a branch in correlatedPatternTree
+        fixNodeLinks(item,newNode)
+            To create the link for nodes with same item
+        printTree(Node)
+            gives the details of node in correlatedPatternGrowth tree
+        addPrefixPath(prefix,port,minSup)
+           It takes the items in prefix pattern whose support is >=minSup and construct a subtree
+    """
+
+    def __init__(self) -> None:
+        self.headerList = []
+        self.mapItemNodes = {}
+        self.mapItemLastNodes = {}
+        self.root = _Node()
+
+    def addTransaction(self, transaction: List[int]) -> None:
+        """
+        Adding transaction into tree
+
+        :param transaction : it represents a single transaction in a database
+        :type transaction : list
+        :return: None
+        """
+
+        current = self.root
+        for i in transaction:
+            child = current.getChild(i)
+            if child is None:
+                newNode = _Node()
+                newNode.itemId = i
+                newNode.parent = current
+                current.child.append(newNode)
+                self.fixNodeLinks(i, newNode)
+                current = newNode
+            else:
+                child.counter += 1
+                current = child
+
+    def fixNodeLinks(self, item: int, newNode: '_Node') -> None:
+        """
+        Fixing node link for the newNode that inserted into correlatedPatternTree
+
+        :param item: it represents the item of newNode
+        :type item : int
+        :param newNode : it represents the newNode that inserted in correlatedPatternTree
+        :type newNode : Node
+        :return: None
+        """
+        if item in self.mapItemLastNodes.keys():
+            lastNode = self.mapItemLastNodes[item]
+            lastNode.nodeLink = newNode
+        self.mapItemLastNodes[item] = newNode
+        if item not in self.mapItemNodes.keys():
+            self.mapItemNodes[item] = newNode
+
+    def printTree(self, root: '_Node') -> None:
+        """
+        This method is to find the details of parent, children, and support of a Node
+
+        :param root: it represents the Node in correlatedPatternTree
+        :type root: Node
+        :return: None
+        """
+
+        if root.child is None:
+            return
+        else:
+            for i in root.child:
+                print(i.itemId, i.counter, i.parent.itemId)
+                self.printTree(i)
+
+    def createHeaderList(self, mapSupport: Dict[int, int], minSup: int) -> None:
+        """
+        To create the headerList
+
+        :param mapSupport : it represents the items with their supports
+        :type mapSupport : dictionary
+        :param minSup : it represents the minSup
+        :param minSup : float
+        :return: None
+        """
+        
+        t1 = []
+        for x, y in mapSupport.items():
+            if y >= minSup:
+                t1.append(x)
+        itemSetBuffer = [k for k, v in sorted(mapSupport.items(), key=lambda x: x[1], reverse=True)]
+        self.headerList = [i for i in t1 if i in itemSetBuffer]
+
+    def addPrefixPath(self, prefix: List['_Node'], mapSupportBeta, minSup) -> None:
+        """
+        To construct the conditional tree with prefix paths of a node in correlatedPatternTree
+
+        :param prefix : it represents the prefix items of a Node
+        :type prefix : list
+        :param mapSupportBeta : it represents the items with their supports
+        :param mapSupportBeta : dictionary
+        :param minSup : to check the item meets with minSup
+        :param minSup : float
+        :return: None
+        """
+        pathCount = prefix[0].counter
+        current = self.root
+        prefix.reverse()
+        for i in range(0, len(prefix) - 1):
+            pathItem = prefix[i]
+            if mapSupportBeta.get(pathItem.itemId) >= minSup:
+                child = current.getChild(pathItem.itemId)
+                if child is None:
+                    newNode = _Node()
+                    newNode.itemId = pathItem.itemId
+                    newNode.parent = current
+                    newNode.counter = pathCount
+                    current.child.append(newNode)
+                    current = newNode
+                    self.fixNodeLinks(pathItem.itemId, newNode)
+                else:
+                    child.counter += pathCount
+                    current = child
+
+
+
+[docs] +class CoMine(_ab._correlatedPatterns): + """ + About this algorithm + ==================== + + :Description: CoMine is one of the fundamental algorithm to discover correlated patterns in a transactional database. It is based on the traditional FP-Growth algorithm. This algorithm uses depth-first search technique to find all correlated patterns in a transactional database. + + :Reference: Lee, Y.K., Kim, W.Y., Cao, D., Han, J. (2003). CoMine: efficient mining of correlated patterns. In ICDM (pp. 581–584). + + :param iFile: str : + Name of the Input file to mine complete set of correlated patterns + :param oFile: str : + Name of the output file to store complete set of correlated patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + :param minAllConf: float : + The user can specify minAllConf values within the range (0, 1). + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + minSup : int + The user given minSup + minAllConf: float + The user given minimum all confidence Ratio(should be in range of 0 to 1) + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transactions + tree : class + it represents the Tree class + itemSetCount : int + it represents the total no of patterns + finalPatterns : dict + it represents to store the patterns + itemSetBuffer : list + it represents the store the items in mining + maxPatternLength : int + it represents the constraint for pattern length + + Execution methods + ================= + + **Terminal command** + + .. code-block:: console + + Format: + + (.venv) $ python3 CoMine.py <inputFile> <outputFile> <minSup> <minAllConf> <sep> + + Example Usage: + + (.venv) $ python3 CoMine.py sampleTDB.txt output.txt 0.25 0.2 + + .. note:: minSup can be specified in support count or a value between 0 and 1. + + **Calling from a python program** + + .. code-block:: python + + from PAMI.correlatedPattern.basic import CoMine as alg + + obj = alg.CoMine(iFile, minSup, minAllConf,sep) + + obj.mine() + + patterns = obj.getPatterns() + + print("Total number of Patterns:", len(patterns)) + + obj.savePatterns(oFile) + + df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + Credits + ======= + + The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran. + + """ + + _startTime = float() + _endTime = float() + _minSup = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _memoryUSS = float() + _memoryRSS = float() + _minAllConf = 0.0 + _Database = [] + _mapSupport = {} + _lno = 0 + _tree = str() + _itemSetBuffer = None + _fpNodeTempBuffer = [] + _itemSetCount = 0 + _maxPatternLength = 1000 + _sep = "\t" + + def __init__(self, iFile: Union[str, _pd.DataFrame], minSup: Union[int, float, str], minAllConf: float, sep: str="\t") ->None: + """ + param iFile: give the input file + type iFile: str or DataFrame or url + param minSup: minimum support + type minSup: int or float + param sep: Delimiter of input file + type sep: str + """ + + super().__init__(iFile, minSup, minAllConf, sep) + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _getRatio(self, prefix: List[int], prefixLength: int, s: int) -> float: + """ + A Function to get itemSet Ratio + + :param prefix:the path + :type prefix: list + :param prefixLength: length + :type prefixLength:int + :s :current ratio + :type s:float + :return: minAllConf of prefix + :rtype:float + """ + maximums = 0 + for ele in range(prefixLength): + i = prefix[ele] + if maximums < self._mapSupport.get(i): + maximums = self._mapSupport.get(i) + return s / maximums + + def _correlatedOneItem(self) -> None: + """ + Generating One correlated item + """ + self._mapSupport = {} + for i in self._Database: + for j in i: + if j not in self._mapSupport: + self._mapSupport[j] = 1 + else: + self._mapSupport[j] += 1 + + def _saveItemSet(self, prefix, prefixLength, support) -> None: + """ + To save the correlated patterns mined form correlatedPatternTree + + :param prefix: the correlated pattern + :type prefix: list + :param prefixLength : the length of a correlated pattern + :type prefixLength : int + :param support: the support of a pattern + :type support : int + :return: None + + The correlated patterns were stored in a global variable finalPatterns + """ + all_conf = self._getRatio(prefix, prefixLength, support) + if all_conf < self._minAllConf: + return + l = [] + for i in range(prefixLength): + l.append(prefix[i]) + self._itemSetCount += 1 + self._finalPatterns[tuple(l)] = [support, all_conf] + + def _convert(self, value: Union[int, float, str]) -> None: + """ + To convert the type of user specified minSup value + + :param value: user specified minSup value + :return: None + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + def _saveAllCombinations(self, tempBuffer, s, position, prefix, prefixLength) -> None: + """ + Generating all the combinations for items in single branch in correlatedPatternTree + + :param tempBuffer: items in a single branch + :type tempBuffer: list + :param s : support at leaf node of a branch + :param position : the length of a tempBuffer + :type position : int + :param prefix : it represents the list of leaf node + :type prefix : list + :param prefixLength : the length of prefix + :type prefixLength :int + :return: None + """ + max1 = 1 << position + for i in range(1, max1): + newPrefixLength = prefixLength + for j in range(position): + isSet = i & (1 << j) + if isSet > 0: + prefix.insert(newPrefixLength, tempBuffer[j].itemId) + newPrefixLength += 1 + self._saveItemSet(prefix, newPrefixLength, s) + + def _correlatedPatternGrowthGenerate(self, correlatedPatternTree, prefix, prefixLength, mapSupport) -> None: + """ + Mining the fp tree + + :param correlatedPatternTree: it represents the correlatedPatternTree + :type correlatedPatternTree: class Tree + :param prefix : it represents an empty list and store the patterns that are mined + :type prefix : list + :param prefixLength : the length of prefix + :type prefixLength :int + :param mapSupport : it represents the support of item + :type mapSupport : dictionary + :return: None + """ + + singlePath = True + position = 0 + s = 0 + if len(correlatedPatternTree.root.child) > 1: + singlePath = False + else: + currentNode = correlatedPatternTree.root.child[0] + while True: + if len(currentNode.child) > 1: + singlePath = False + break + self._fpNodeTempBuffer.insert(position, currentNode) + s = currentNode.counter + position += 1 + if len(currentNode.child) == 0: + break + currentNode = currentNode.child[0] + if singlePath is True: + self._saveAllCombinations(self._fpNodeTempBuffer, s, position, prefix, prefixLength) + else: + for i in reversed(correlatedPatternTree.headerList): + item = i + support = mapSupport[i] + betaSupport = support + prefix.insert(prefixLength, item) + self._saveItemSet(prefix, prefixLength + 1, betaSupport) + if prefixLength + 1 < self._maxPatternLength: + prefixPaths = [] + path = correlatedPatternTree.mapItemNodes.get(item) + mapSupportBeta = {} + while path is not None: + if path.parent.itemId != -1: + prefixPath = [] + prefixPath.append(path) + pathCount = path.counter + parent1 = path.parent + while parent1.itemId != -1: + prefixPath.append(parent1) + if mapSupportBeta.get(parent1.itemId) is None: + mapSupportBeta[parent1.itemId] = pathCount + else: + mapSupportBeta[parent1.itemId] = mapSupportBeta[parent1.itemId] + pathCount + parent1 = parent1.parent + prefixPaths.append(prefixPath) + path = path.nodeLink + treeBeta = _Tree() + for k in prefixPaths: + treeBeta.addPrefixPath(k, mapSupportBeta, self._minSup) + if len(treeBeta.root.child) > 0: + treeBeta.createHeaderList(mapSupportBeta, self._minSup) + self._correlatedPatternGrowthGenerate(treeBeta, prefix, prefixLength + 1, mapSupportBeta) + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + main method to start + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + main method to start + """ + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + self._creatingItemSets() + self._minSup = self._convert(self._minSup) + self._tree = _Tree() + self._finalPatterns = {} + self._correlatedOneItem() + self._mapSupport = {k: v for k, v in self._mapSupport.items() if v >= self._minSup} + _itemSetBuffer = [k for k, v in sorted(self._mapSupport.items(), key=lambda x: x[1], reverse=True)] + for i in self._Database: + _transaction = [] + for j in i: + if j in _itemSetBuffer: + _transaction.append(j) + _transaction.sort(key=lambda val: self._mapSupport[val], reverse=True) + self._tree.addTransaction(_transaction) + self._tree.createHeaderList(self._mapSupport, self._minSup) + if len(self._tree.headerList) > 0: + self._itemSetBuffer = [] + self._correlatedPatternGrowthGenerate(self._tree, self._itemSetBuffer, 0, self._mapSupport) + print("Correlated patterns were generated successfully using CoMine algorithm") + self._endTime = _ab._time.time() + self._memoryUSS = float() + self._memoryRSS = float() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _pd.DataFrame: + """ + Storing final correlated patterns in a dataframe + + :return: returning correlated patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataframe = {} + data = [] + for a, b in self._finalPatterns.items(): + pat = " " + for i in a: + pat += str(i) + " " + data.append([pat, b[0], b[1]]) + dataframe = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Confidence']) + return dataframe
+ + +
+[docs] + def save(self, outFile) -> None: + """ + Complete set of correlated patterns will be saved into an output file + + :param outFile: name of the outputfile + :type outFile: file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + pat = "" + for i in x: + pat += str(i) + "\t" + patternsAndSupport = pat.strip() + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getPatterns(self) -> Dict[Tuple[int], List[Union[int, float]]]: + """ + Function to send the set of correlated patterns after completion of the mining process + + :return: returning correlated patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + function to print the result after completing the process + + :return: None + """ + print("Total number of Correlated Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = CoMine(_ab._sys.argv[1], _ab._sys.argv[3], float(_ab._sys.argv[4]), _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = CoMine(_ab._sys.argv[1], _ab._sys.argv[3], float(_ab._sys.argv[4])) + _ap.startMine() + _ap.mine() + print("Total number of Correlated-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/correlatedPattern/basic/CoMinePlus.html b/sphinx/_build/html/_modules/PAMI/correlatedPattern/basic/CoMinePlus.html new file mode 100644 index 000000000..861a0d9b3 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/correlatedPattern/basic/CoMinePlus.html @@ -0,0 +1,863 @@ + + + + + + PAMI.correlatedPattern.basic.CoMinePlus — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.correlatedPattern.basic.CoMinePlus

+# CPGrowthPlus is one of the efficient algorithm to discover Correlated patterns in a transactional database.
+#
+# **Importing this algorithm into a python program**
+# -----------------------------------------------
+#
+#             from PAMI.correlatedPattern.basic import CoMinePlus as alg
+#
+#             obj = alg.CoMinePlus(iFile, minSup, minAllConf, sep)
+#
+#             obj.mine()
+#
+#             correlatedPattern = obj.getPatterns()
+#
+#             print("Total number of correlated Patterns:", len(correlatedPattern))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+"""
+
+from PAMI.correlatedPattern.basic import abstract as _ab
+import pandas as _pd
+from typing import List, Dict, Tuple, Set, Union, Any, Optional, Generator
+from deprecated import deprecated
+
+
+class _Node:
+    """
+    A class used to represent the node of correlatedPatternTree
+
+    :Attributes:
+
+        itemId: int
+            storing item of a node
+        counter: int
+            To maintain the support of node
+        parent: node
+            To maintain the parent of every node
+        child: list
+            To maintain the children of node
+        nodeLink : node
+            Points to the node with same itemId
+
+    :Methods:
+
+        getChild(itemName)
+            returns the node with same itemName from correlatedPatternTree
+    """
+
+    def __init__(self) -> None:
+
+        self.itemId = -1
+        self.counter = 1
+        self.parent = None
+        self.child = []
+        self.nodeLink = None
+
+    def getChild(self, itemName: int) -> Union['_Node', None]:
+        """
+        Retrieving the child from the tree
+
+        :param itemName: name of the child
+        :type itemName: list
+        :return: returns the node with same itemName from correlatedPatternTree
+        :rtype: list
+        """
+        for i in self.child:
+            if i.itemId == itemName:
+                return i
+        return None
+
+
+class _Tree:
+    """
+    A class used to represent the correlatedPatternGrowth tree structure
+
+    :Attributes:
+
+        headerList : list
+            storing the list of items in tree sorted in ascending of their supports
+        mapItemNodes : dictionary
+            storing the nodes with same item name
+        mapItemLastNodes : dictionary
+            representing the map that indicates the last node for each item
+        root : Node
+            representing the root Node in a tree
+
+    :Methods:
+
+        createHeaderList(items,minSup)
+            takes items only which are greater than minSup and sort the items in ascending order
+        addTransaction(transaction)
+            creating transaction as a branch in correlatedPatternTree
+        fixNodeLinks(item,newNode)
+            To create the link for nodes with same item
+        printTree(Node)
+            gives the details of node in correlatedPatternGrowth tree
+        addPrefixPath(prefix,port,minSup)
+           It takes the items in prefix pattern whose support is >=minSup and construct a subtree
+    """
+
+    def __init__(self) -> None:
+        self.headerList = []
+        self.mapItemNodes = {}
+        self.mapItemLastNodes = {}
+        self.root = _Node()
+
+    def addTransaction(self, transaction: List[int]) -> None:
+        """
+        Adding a transaction into a tree
+
+        :param transaction: it represents a transaction in a database
+        :type transaction: list
+        :return: None
+        """
+
+        # This method taken a transaction as input and returns the tree
+        current = self.root
+        for i in transaction:
+            child = current.getChild(i)
+            if not child:
+                newNode = _Node()
+                newNode.itemId = i
+                newNode.parent = current
+                current.child.append(newNode)
+                self.fixNodeLinks(i, newNode)
+                current = newNode
+            else:
+                child.counter += 1
+                current = child
+
+    def fixNodeLinks(self, item: int, newNode: _Node) -> None:
+        """
+        Fixing node link for the newNode that inserted into correlatedPatternTree
+
+        :param item: it represents the item of newNode
+        :type item: int
+        :param newNode: it represents the newNode that inserted in correlatedPatternTree
+        :type newNode: Node
+        :return: None
+        """
+        if item in self.mapItemLastNodes.keys():
+            lastNode = self.mapItemLastNodes[item]
+            lastNode.nodeLink = newNode
+        self.mapItemLastNodes[item] = newNode
+        if item not in self.mapItemNodes.keys():
+            self.mapItemNodes[item] = newNode
+
+    def printTree(self, root: _Node) -> None:
+        """
+        Print the details of Node in correlatedPatternTree
+
+        :param root: it represents the Node in correlatedPatternTree
+        :type root: Node
+        :return: None
+        """
+        # this method is used print the details of tree
+        if not root.child:
+            return
+        else:
+            for i in root.child:
+                print(i.itemId, i.counter, i.parent.itemId)
+                self.printTree(i)
+
+    def createHeaderList(self, mapSupport: Dict[int, int], minSup: int) -> None:
+        """
+        To create the headerList
+
+        :param mapSupport: it represents the items with their supports
+        :type mapSupport: dictionary
+        :param minSup: it represents the minSup
+        :param minSup: float
+        :return: None
+        """
+        # the correlatedPatternTree always maintains the header table to start the mining from leaf nodes
+        t1 = []
+        for x, y in mapSupport.items():
+            if y >= minSup:
+                t1.append(x)
+        itemSetBuffer = [k for k, v in sorted(mapSupport.items(), key=lambda val: val[1], reverse=True)]
+        self.headerList = [i for i in t1 if i in itemSetBuffer]
+
+    def addPrefixPath(self, prefix: List[_Node], mapSupportBeta: Dict[int, int], minSup: int) -> None:
+        """
+        To construct the conditional tree with prefix paths of a node in correlatedPatternTree
+
+        :param prefix: it represents the prefix items of a Node
+        :type prefix: list
+        :param mapSupportBeta: it represents the items with their supports
+        :param mapSupportBeta: dictionary
+        :param minSup: to check the item meets with minSup
+        :param minSup: float
+        :return: None
+        """
+        # this method is used to add prefix paths in conditional trees of correlatedPatternTree
+        pathCount = prefix[0].counter
+        current = self.root
+        prefix.reverse()
+        for i in range(0, len(prefix) - 1):
+            pathItem = prefix[i]
+            if mapSupportBeta.get(pathItem.itemId) >= minSup:
+                child = current.getChild(pathItem.itemId)
+                if not child:
+                    newNode = _Node()
+                    newNode.itemId = pathItem.itemId
+                    newNode.parent = current
+                    newNode.counter = pathCount
+                    current.child.append(newNode)
+                    current = newNode
+                    self.fixNodeLinks(pathItem.itemId, newNode)
+                else:
+                    child.counter += pathCount
+                    current = child
+
+
+
+[docs] +class CoMinePlus(_ab._correlatedPatterns): + """ + About this algorithm + ==================== + + :Description: CoMinePlus is one of the efficient algorithm to discover correlated patterns in a transactional database. Using Item Support Intervals technique which is generating correlated patterns of higher order by combining only with items that have support within specified interval. + + :Reference: + Uday Kiran R., Kitsuregawa M. (2012) Efficient Discovery of Correlated Patterns in Transactional Databases Using Items’ Support Intervals. + In: Liddle S.W., Schewe KD., Tjoa A.M., Zhou X. (eds) Database and Expert Systems Applications. DEXA 2012. Lecture Notes in Computer Science, vol 7446. Springer, Berlin, Heidelberg. + https://doi.org/10.1007/978-3-642-32600-4_18 + + :param iFile: str : + Name of the Input file to mine complete set of correlated patterns + :param oFile: str : + Name of the output file to store complete set of correlated patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + :param minAllConf: str : + Name of Neighbourhood file name + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + minSup : float + The user given minSup + minAllConf: float + The user given minimum all confidence Ratio (should be in range of 0 to 1) + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transactions + tree : class + it represents the Tree class + itemSetCount : int + it represents the total no of patterns + finalPatterns : dict + it represents to store the patterns + itemSetBuffer : list + it represents the store the items in mining + maxPatternLength : int + it represents the constraint for pattern length + + Execution methods + ================= + + **Terminal command** + + .. code-block:: console + + Format: + + (.venv) $ python3 CoMinePlus.py <inputFile> <outputFile> <minSup> <minAllConf> <sep> + + Example Usage: + + (.venv) $ python3 CoMinePlus.py sampleTDB.txt patterns.txt 0.4 0.5 ',' + + .. note:: minSup can be specified in support count or a value between 0 and 1. + + **Calling from a python program** + + .. code-block:: python + + from PAMI.correlatedPattern.basic import CoMinePlus as alg + + obj = alg.CoMinePlus(iFile, minSup, minAllConf, sep) + + obj.mine() + + correlatedPatterns = obj.getPatterns() + + print("Total number of correlated patterns:", len(correlatedPatterns)) + + obj.save(oFile) + + df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + + Credits + ======= + + The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran. + + """ + + _startTime = float() + _endTime = float() + _minSup = str() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _minAllConf = 0.0 + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _mapSupport = {} + _lno = 0 + _tree = str() + _itemSetBuffer = None + _fpNodeTempBuffer = [] + _itemSetCount = 0 + _maxPatternLength = 1000 + _sep = "\t" + + def __init__(self, iFile: Union[str, _pd.DataFrame], minSup: Union[int, float, str], minAllConf: str, sep: str="\t") -> None: + """ + param iFile: input file name + + type iFile: str or DataFrame or url + param minSup: user-specified minimum support + type minSup: int or float + param minAllConf: user-specified minimum all confidence + type minAllConf: float + param sep: delimiter of input file + type sep : str + """ + super().__init__(iFile, minSup, minAllConf, sep) + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _correlatedOneItem(self) -> None: + """ + Generating One correlated items sets + """ + self._mapSupport = {} + for i in self._Database: + for j in i: + if j not in self._mapSupport: + self._mapSupport[j] = 1 + else: + self._mapSupport[j] += 1 + + def _saveItemSet(self, prefix: List[_Node], prefixLength: int, support: int, ratio: float) -> None: + """ + To save the correlated patterns mined form correlatedPatternTree + + :param prefix: the correlated pattern + :type prefix: list + :param prefixLength: the length of a correlated pattern + :type prefixLength: int + :param support: the support of a pattern + :type support: int + :param ratio: float + :return: None + """ + + sample = [] + for i in range(prefixLength): + sample.append(prefix[i]) + self._itemSetCount += 1 + self._finalPatterns[tuple(sample)] = [support, ratio] + + def _saveAllCombinations(self, tempBuffer: List[_Node], s: int, position: int, prefix: List[_Node], prefixLength: int) -> None: + """ + Generating all the combinations for items in single branch in correlatedPatternTree + + :param tempBuffer: items in a single branch + :type tempBuffer: list + :param s: support at leaf node of a branch + :param position: the length of a tempBuffer + :type position: int + :param prefix: it represents the list of leaf node + :type prefix: list + :param prefixLength: the length of prefix + :type prefixLength: int + :return: None + """ + max1 = 1 << position + for i in range(1, max1): + newPrefixLength = prefixLength + for j in range(position): + isSet = i & (1 << j) + if isSet > 0: + prefix.insert(newPrefixLength, tempBuffer[j].itemId) + newPrefixLength += 1 + ratio = s/self._mapSupport[self._getMaxItem(prefix, newPrefixLength)] + if ratio >= self._minAllConf: + self._saveItemSet(prefix, newPrefixLength, s, ratio) + + def _correlatedPatternGrowthGenerate(self, correlatedPatternTree: _Tree, prefix: List[_Node], prefixLength: int, mapSupport: Dict[int, int], minConf: float) -> None: + """ + Mining the fp tree + + :param correlatedPatternTree: it represents the correlatedPatternTree + :type correlatedPatternTree: class Tree + :param prefix: it represents an empty list and store the patterns that are mined + :type prefix: list + :param param prefixLength: the length of prefix + :type prefixLength: int + :param mapSupport : it represents the support of item + :type mapSupport : dictionary + :param minConf: representing the minimum confidence + :type minConf: float + :return: None + """ + singlePath = True + position = 0 + s = 0 + if len(correlatedPatternTree.root.child) > 1: + singlePath = False + else: + currentNode = correlatedPatternTree.root.child[0] + while True: + if len(currentNode.child) > 1: + singlePath = False + break + self._fpNodeTempBuffer.insert(position, currentNode) + s = currentNode.counter + position += 1 + if len(currentNode.child) == 0: + break + currentNode = currentNode.child[0] + if singlePath is True: + self._saveAllCombinations(self._fpNodeTempBuffer, s, position, prefix, prefixLength) + else: + for i in reversed(correlatedPatternTree.headerList): + item = i + support = mapSupport[i] + low = max(int(_ab._math.floor(mapSupport[i]*self._minAllConf)), self._minSup) + high = max(int(_ab._math.floor(mapSupport[i]/minConf)), self._minSup) + betaSupport = support + prefix.insert(prefixLength, item) + max1 = self._getMaxItem(prefix, prefixLength) + if self._mapSupport[max1] < self._mapSupport[item]: + max1 = item + ratio = support / self._mapSupport[max1] + if ratio >= self._minAllConf: + self._saveItemSet(prefix, prefixLength + 1, betaSupport, ratio) + if prefixLength + 1 < self._maxPatternLength: + prefixPaths = [] + path = correlatedPatternTree.mapItemNodes.get(item) + mapSupportBeta = {} + while path is not None: + if path.parent.itemId != -1: + prefixPath = [path] + pathCount = path.counter + parent1 = path.parent + if mapSupport.get(parent1.itemId) >= low and mapSupport.get(parent1.itemId) <= high: + while parent1.itemId != -1: + all_conf = int(support/max(mapSupport.get(parent1.itemId), support)) + if mapSupport.get(parent1.itemId) >= all_conf: + prefixPath.append(parent1) + if mapSupportBeta.get(parent1.itemId) is None: + mapSupportBeta[parent1.itemId] = pathCount + else: + mapSupportBeta[parent1.itemId] = mapSupportBeta[parent1.itemId] + pathCount + parent1 = parent1.parent + else: + break + prefixPaths.append(prefixPath) + path = path.nodeLink + treeBeta = _Tree() + for k in prefixPaths: + treeBeta.addPrefixPath(k, mapSupportBeta, self._minSup) + if len(treeBeta.root.child) > 0: + treeBeta.createHeaderList(mapSupportBeta, self._minSup) + self._correlatedPatternGrowthGenerate(treeBeta, prefix, prefixLength + 1, mapSupportBeta, minConf) + + def _convert(self, value: Union[int, float, str]) -> Union[int, float]: + """ + To convert the type of user specified minSup value + + :param value: user specified minSup value + :return: converted type + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = (len(self._Database) * value) + else: + value = int(value) + return value + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + Main program to start the operation + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + Main program to start the operation + """ + + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minSup is None: + raise Exception("Please enter the Minimum Support") + self._creatingItemSets() + self._finalPatterns = {} + self._tree = _Tree() + self._minSup = self._convert(self._minSup) + self._correlatedOneItem() + self._mapSupport = {k: v for k, v in self._mapSupport.items() if v >= self._minSup} + _itemSetBuffer = [k for k, v in sorted(self._mapSupport.items(), key=lambda x: x[1], reverse=True)] + for i in self._Database: + _transaction = [] + for j in i: + if j in _itemSetBuffer: + _transaction.append(j) + _transaction.sort(key=lambda val: self._mapSupport[val], reverse=True) + self._tree.addTransaction(_transaction) + self._tree.createHeaderList(self._mapSupport, self._minSup) + if len(self._tree.headerList) > 0: + self._itemSetBuffer = [] + self._correlatedPatternGrowthGenerate(self._tree, self._itemSetBuffer, 0, self._mapSupport, self._minAllConf) + print("Correlated Frequent patterns were generated successfully using CorrelatedPatternGrowth algorithm") + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryRSS = float() + self._memoryUSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + + def _getMaxItem(self, prefix: List[_Node], prefixLength: int) -> int: + maxItem = prefix[0] + for i in range(prefixLength): + if self._mapSupport[maxItem] < self._mapSupport[prefix[i]]: + maxItem = prefix[i] + return maxItem + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _pd.DataFrame: + """ + Storing final correlated patterns in a dataframe + + :return: returning correlated patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataframe = {} + data = [] + for a, b in self._finalPatterns.items(): + pat = " " + for i in a: + pat += str(i) + " " + data.append([pat, b[0], b[1]]) + dataframe = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Confidence']) + return dataframe
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of correlated patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + pattern = str() + for i in x: + pattern = pattern + i + "\t" + s1 = str(pattern.strip()) + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> Dict[Tuple[str], List[Union[int, float]]]: + """ + Function to send the set of correlated patterns after completion of the mining process + + :return: returning correlated patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + function to print the result after completing the process + """ + print("Total number of Correlated Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = CoMinePlus(_ab._sys.argv[1], _ab._sys.argv[3], float(_ab._sys.argv[4]), _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = CoMinePlus(_ab._sys.argv[1], _ab._sys.argv[3], float(_ab._sys.argv[4])) + _ap.startMine() + _ap.mine() + _correlatedPatterns = _ap.getPatterns() + print("Total number of Correlated-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/coveragePattern/basic/CMine.html b/sphinx/_build/html/_modules/PAMI/coveragePattern/basic/CMine.html new file mode 100644 index 000000000..6cb6a76bd --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/coveragePattern/basic/CMine.html @@ -0,0 +1,573 @@ + + + + + + PAMI.coveragePattern.basic.CMine — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.coveragePattern.basic.CMine

+# CMine algorithms aims to discover the coverage patterns in transactional databases.
+#
+# **Importing this algorithm into a python program**
+# ----------------------------------------------------
+#
+#
+#             from PAMI.coveragePattern.basic import CMine as alg
+#
+#             obj = alg.CMine(iFile, minRF, minCS, maxOR, seperator)
+#
+#             obj.mine()
+#
+#             coveragePattern = obj.getPatterns()
+#
+#             print("Total number of coverage Patterns:", len(coveragePattern))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+     
+"""
+
+
+
+
+from PAMI.coveragePattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+from deprecated import deprecated
+
+
+[docs] +class CMine(_ab._coveragePatterns): + """ + About this algorithm + ==================== + + :Description: CMine algorithms aims to discover the coverage patterns in transactional databases. + + :Reference: Bhargav Sripada, Polepalli Krishna Reddy, Rage Uday Kiran: + Coverage patterns for efficient banner advertisement placement. WWW (Companion Volume) 2011: 131-132 + __https://dl.acm.org/doi/10.1145/1963192.1963259 + + :param iFile: str : + Name of the Input file to mine complete set of coverage patterns + :param oFile: str : + Name of the output file to store complete set of coverage patterns + :param minRF: str: + Controls the minimum number of transactions in which every item must appear in a database. + :param minCS: str: + Controls the minimum number of transactions in which at least one time within a pattern must appear in a database. + :param maxOR: str: + Controls the maximum number of transactions in which any two items within a pattern can reappear. + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + startTime : float + To record the start time of the mining process + + endTime : float + To record the completion time of the mining process + + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + + memoryUSS : float + To store the total amount of USS memory consumed by the program + + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + Database : list + To store the transactions of a database in list + + + Execution methods + ================= + + **Terminal command** + + .. code-block:: console + + Format: + + (.venv) $ python3 CMine.py <inputFile> <outputFile> <minRF> <minCS> <maxOR> <'\t'> + + Example Usage: + + (.venv) $ python3 CMine.py sampleTDB.txt patterns.txt 0.4 0.7 0.5 '\t' + + .. note: At the fixed minCS value, it can also be observed that the number of patterns increases as maxOR value increases. + + **Calling from a python program** + + .. code-block:: python + + from PAMI.coveragePattern.basic import CMine as alg + + obj = alg.CMine(iFile, minRF, minCS, maxOR, seperator) + + obj.mine() + + coveragePattern = obj.getPatterns() + + print("Total number of coverage Patterns:", len(coveragePattern)) + + obj.save(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + + Credits + ======= + + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _minCS = str() + _minRF = str() + _maxOR = str() + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _mapSupport = {} + _lno = 0 + + + def _convert(self, value) -> Union[int, float]: + """ + To convert the user specified minSup value + + :param value: user specified minSup value + :return: converted type + :rtype: Union[int, float] + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = value + if type(value) is str: + if '.' in value: + value = float(value) + else: + value = int(value) + return value + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + self._mapSupport = {} + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r') as f: + for line in f: + self._lno += 1 + splitter = [i.rstrip() for i in line.split(self._sep)] + splitter = [x for x in splitter if x] + self._Database.append(splitter) + except IOError: + print("File Not Found") + +
+[docs] + def creatingCoverageItems(self) -> Dict[str, List[str]]: + """ + This function creates coverage items from _database. + + :return: coverageTidData that stores coverage items and their tid list. + :rtype: dict + """ + tidData = {} + self._lno = 0 + for transaction in self._Database: + self._lno = self._lno + 1 + for item in transaction[1:]: + if item not in tidData: + tidData[item] = [self._lno] + else: + tidData[item].append(self._lno) + coverageTidData = {k: v for k, v in tidData.items() if len(v) / len(self._Database) >= self._minRF} + coverageTidData = dict(sorted(coverageTidData.items(), reverse=True, key=lambda x: len(x[1]))) + return coverageTidData
+ + +
+[docs] + def tidToBitset(self,item_set: Dict[str, int]) -> Dict[str, int]: + """ + This function converts tid list to bitset. + + :param item_set: + :return: Dictionary + :rtype: dict + """ + bitset = {} + + for k,v in item_set.items(): + bitset[k] = 0b1 + bitset[k] = (bitset[k] << int(v[0])) | 0b1 + for i in range(1,len(v)): + diff = int(v[i]) - int(v[i-1]) + bitset[k] = (bitset[k] << diff) | 0b1 + bitset[k] = (bitset[k] << (self._lno - int(v[i]))) + return bitset
+ + +
+[docs] + def genPatterns(self,prefix: Tuple[str, int],tidData: List[Tuple[str, int]]) -> None: + """ + This function generate coverage pattern about prefix. + + :param prefix: String + :param tidData: list + :return: None + """ + # variables to store coverage item set and + item_set = prefix[0] + + # Get the length of tidData + length = len(tidData) + for i in range(length): + tid = prefix[1] & tidData[i][1] + tid1 = prefix[1] | tidData[i][1] + andCount = bin(tid).count("1") - 1 + orCount = bin(tid1).count("1") - 1 + if orCount/len(self._Database) >= self._minCS and andCount / len(str(prefix[1])) <= self._maxOR: + coverageItem_set = item_set + '\t' + tidData[i][0] + if orCount / len(self._Database) >= self._minRF: + self._finalPatterns[coverageItem_set] = andCount + self.genPatterns((coverageItem_set,tid),tidData[i+1:length])
+ + +
+[docs] + def generateAllPatterns(self,coverageItems: Dict[str, int]) -> None: + """ + This function generates all coverage patterns. + + :param coverageItems: coverage items + :return: None + """ + tidData = list(coverageItems.items()) + length = len(tidData) + for i in range(length): + #print(i,tidData[i][0]) + self.genPatterns(tidData[i],tidData[i+1:length])
+ + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ Main method to start """ + + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ Main method to start """ + + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + self._creatingItemSets() + self._minCS = self._convert(self._minCS) + self._minRF = self._convert(self._minRF) + self._maxOR = self._convert(self._maxOR) + coverageItems = self.creatingCoverageItems() + self._finalPatterns = {k: len(v) for k, v in coverageItems.items()} + coverageItemsBitset = self.tidToBitset(coverageItems) + self.generateAllPatterns(coverageItemsBitset) + self.save('output.txt') + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Coverage patterns were generated successfully using CMine algorithm")
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final coverage patterns in a dataframe + + :return: returning coverage patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of coverage patterns will be loaded in to an output file + + :param outFile: name of the outputfile + :type outFile: file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getPatterns(self) -> Dict[str, int]: + """ + Function to send the set of coverage patterns after completion of the mining process + + :return: returning coverage patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the result + """ + print("Total number of Coverage Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__=="__main__": + _ap = str() + if len(_ab._sys.argv) == 7 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 7: + _ap = CMine(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5], _ab._sys.argv[6]) + if len(_ab._sys.argv) == 6: + _ap = CMine(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + _ap.startMine() + _ap.mine() + print("Total number of coverage Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/coveragePattern/basic/CPPG.html b/sphinx/_build/html/_modules/PAMI/coveragePattern/basic/CPPG.html new file mode 100644 index 000000000..fcc2e3275 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/coveragePattern/basic/CPPG.html @@ -0,0 +1,617 @@ + + + + + + PAMI.coveragePattern.basic.CPPG — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.coveragePattern.basic.CPPG

+# CPPG algorithm discovers coverage patterns in a transactional database.
+#
+# **Importing this algorithm into a python program**
+# -------------------------------------------------------
+#
+#
+#             from PAMI.coveragePattern.basic import CPPG as alg
+#
+#             obj = alg.CPPG(iFile, minRF, minCS, maxOR)
+#
+#             obj.mine()
+#
+#             coveragePattern = obj.getPatterns()
+#
+#             print("Total number of coverage Patterns:", len(coveragePattern))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+     
+"""
+
+from PAMI.coveragePattern.basic import abstract as _ab
+import pandas as pd
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+from deprecated import deprecated
+
+
+_maxPer = float()
+_minSup = float()
+_lno = int()
+
+
+
+[docs] +class CPPG(_ab._coveragePatterns): + """ + + :Description: CPPG algorithm discovers coverage patterns in a transactional database. + + :Reference: Gowtham Srinivas, P.; Krishna Reddy, P.; Trinath, A. V.; Bhargav, S.; Uday Kiran, R. (2015). + Mining coverage patterns from transactional databases. Journal of Intelligent Information Systems, 45(3), 423–439. + https://link.springer.com/article/10.1007/s10844-014-0318-3 + + :param iFile: str : + Name of the Input file to mine complete set of coverage patterns + :param oFile: str : + Name of the output file to store complete set of coverage patterns + :param minRF: str: + Controls the minimum number of transactions in which every item must appear in a database. + :param minCS: str: + Controls the minimum number of transactions in which at least one time within a pattern must appear in a database. + :param maxOR: str: + Controls the maximum number of transactions in which any two items within a pattern can reappear. + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + startTime : float + To record the start time of the mining process + + endTime : float + To record the completion time of the mining process + + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + + memoryUSS : float + To store the total amount of USS memory consumed by the program + + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + Database : list + To store the transactions of a database in list + + + **Methods to execute code on terminal** + ------------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 CPPG.py <inputFile> <outputFile> <minRF> <minCS> <maxOR> <'\t'> + + Example Usage: + + (.venv) $ python3 CPPG.py sampleTDB.txt patterns.txt 0.4 0.7 0.5 ',' + + .. note:: minSup will be considered in percentage of database transactions + + + **Importing this algorithm into a python program** + -------------------------------------------------- + + .. code-block:: python + + from PAMI.coveragePattern.basic import CPPG as alg + + obj = alg.CPPG(iFile, minRF, minCS, maxOR) + + obj.mine() + + coveragePattern = obj.getPatterns() + + print("Total number of coverage Patterns:", len(coveragePattern)) + + obj.save(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + + **Credits:** + ------------------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + _startTime = float() + _endTime = float() + _minRF = str() + _maxOR = str() + _minCS = str() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _rank = {} + _rankedUp = {} + _lno = 0 + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + data, ts = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + ts = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + for i in range(len(data)): + tr = [ts[i][0]] + tr = tr + data[i] + self._Database.append(tr) + + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + + def _coverageOneItem(self) -> Tuple[Dict[str, List[int]], List[str]]: + """ Calculates the support of each item in the database and assign ranks to the items + by decreasing support and returns the frequent items list + + :returns: return the one-length periodic frequent patterns + :rtype: tuple + """ + data = {} + count = 0 + for tr in self._Database: + count += 1 + for i in range(len(tr)): + if tr[i] not in data: + data[tr[i]] = [count] + else: + data[tr[i]].append(count) + data = {k: v for k, v in data.items() if len(v)/len(self._Database) >= self._minRF} + pfList = [i for i in sorted(data, key=lambda k: len(data[k]), reverse=True)] + return data, pfList + + def _updateDatabases(self, dict1: Dict[str, List[str]]) -> List[List[str]]: + """ Remove the items which are not frequent from database and updates the database with rank of items + + :param dict1: frequent items with support + :type dict1: dict + :return: Sorted and updated transactions + :rtype: list + """ + list2 = [] + for tr in self._Database: + list1 = [] + for i in range(len(tr)): + if tr[i] in dict1: + list1.append(tr[i]) + list2.append([i for i in dict1 if i in list1]) + return list2 + + def _buildProjectedDatabase(self, data: List[List[str]], info: List[str]) -> Dict[str, List[List[str]]]: + """ To construct the projected database for each prefix + :param data: list of transactions with support per prefix + :type data: list + :param info: informatoin on list of transactions with support per prefix + :type info: str + :return: projected data + :rtype: dict + """ + proData = {} + for i in range(len(info)): + prefix = info[i+1:] + proData[info[i]] = [] + for j in data: + te = [] + if info[i] not in j: + for k in j: + if k in prefix: + te.append(k) + if len(te) > 0: + proData[info[i]].append(te) + for x, y in proData.items(): + print(x, y) + return proData + + def _generateFrequentPatterns(self, uniqueItems: List[str]) -> None: + """It will generate the combinations of frequent items + + :param uniqueItems :it represents the items with their respective transaction identifiers + + :type uniqueItems: list + + :return: returning transaction dictionary + + :rtype: dict + """ + new_freqList = [] + for i in range(0, len(uniqueItems)): + item1 = uniqueItems[i] + i1_list = item1.split() + for j in range(i + 1, len(uniqueItems)): + item2 = uniqueItems[j] + i2_list = item2.split() + if i1_list[:-1] == i2_list[:-1]: + interSet = set(self._finalPatterns[item1]).intersection(set(self._finalPatterns[item2])) + union = set(self._finalPatterns[item1]).union(set(self._finalPatterns[item2])) + if len(union)/len(self._Database) >= self._minCS and len(interSet)/len(self._finalPatterns[item1]) <= self._maxOR: + newKey = item1 + " " + i2_list[-1] + self._finalPatterns[newKey] = interSet + new_freqList.append(newKey) + else: + break + + if len(new_freqList) > 0: + self._generateFrequentPatterns(new_freqList) + + def _savePeriodic(self, itemSet: List[str]) -> str: + """ To convert the ranks of items in to their original item names + + :param itemSet: frequent patterns + + :type itemSet: list + + :return: frequent pattern with original item names + + :rtype: string + """ + t1 = str() + for i in itemSet: + t1 = t1 + self._rankedUp[i] + "\t" + return t1 + + def _convert(self, value: Union[int, float, str]) -> Union[int, float]: + """ + To convert the given user specified value + + :param value: user specified value + + :type value: Union[int, float, str] + + :return: converted value + + :rtype: Union[int, float] + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = value + if type(value) is str: + if '.' in value: + value = float(value) + value = value + else: + value = int(value) + return value + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ Mining process will start from this function + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ Mining process will start from this function + """ + + #global _minSup, _maxPer, _lno + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minRF is None: + raise Exception("Please enter the Relative Frequency") + if self._maxOR is None: + raise Exception("Please enter the Overlap Ratio") + if self._minCS is None: + raise Exception("Please enter the Coverage Ratio") + self._creatingItemSets() + self._minRF = self._convert(self._minRF) + self._maxOR = self._convert(self._maxOR) + self._minCS = self._convert(self._minCS) + if self._minRF > len(self._Database) or self._minCS > len(self._Database) or self._maxOR > len(self._Database): + raise Exception("Please enter the constraints in range between 0 to 1") + generatedItems, pfList = self._coverageOneItem() + self._finalPatterns = {k: v for k, v in generatedItems.items()} + updatedDatabases = self._updateDatabases(pfList) + proData = self._buildProjectedDatabase(updatedDatabases, pfList) + for x, y in proData.items(): + uniqueItems = [x] + for i in y: + for j in i: + if j not in uniqueItems: + uniqueItems.append(j) + self._generateFrequentPatterns(uniqueItems) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Coverage patterns were generated successfully using CPPG algorithm ")
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> pd.DataFrame: + """Storing final periodic-frequent patterns in a dataframe + + :return: returning periodic-frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b[0], b[1]]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Periodicity']) + return dataFrame
+ + +
+[docs] + def save(self, outFile: str) -> None: + """Complete set of periodic-frequent patterns will be loaded in to an output file + + :param outFile: name of the outputfile + :type outFile: file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.strip() + ":" + str(len(y)) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> Dict[str, List[int]]: + """ Function to send the set of periodic-frequent patterns after completion of the mining process + + :return: returning periodic-frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + Function used to print the result + """ + print("Total number of Coverage Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 6 or len(_ab._sys.argv) == 7: + if len(_ab._sys.argv) == 7: + _ap = CPPG(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5], _ab._sys.argv[6]) + if len(_ab._sys.argv) == 6: + _ap = CPPG(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + _ap.startMine() + _ap.mine() + print("Total number of Coverage Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/DF2DB/DF2DB.html b/sphinx/_build/html/_modules/PAMI/extras/DF2DB/DF2DB.html new file mode 100644 index 000000000..f1bcffea7 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/DF2DB/DF2DB.html @@ -0,0 +1,235 @@ + + + + + + PAMI.extras.DF2DB.DF2DB — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for PAMI.extras.DF2DB.DF2DB

+# DF2DB in this code dataframe is converting databases into sparse or dense transactional, temporal, Utility.
+#
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.DF2DB import DF2DB as db
+#
+#             obj = db.DF2DB(idf, ">=", 16, "sparse/dense")
+#
+#             obj.getTransactional("outputFileName") # To create transactional database
+#
+#             obj.getTemporal("outputFileName") # To create temporal database
+#
+#             obj.getUtility("outputFileName") # To create utility database
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+from PAMI.extras.DF2DB.DenseFormatDF import *
+from PAMI.extras.DF2DB.SparseFormatDF import *
+import sys
+
+
+[docs] +class DF2DB: + """ + :Description: This class will create database for given DataFrame based on Threshold values and conditions are defined in the class. + Converts Dataframe into sparse or dense dataframes. + + :Attributes: + + :param inputDF: DataFrame : + It is sparse or dense DataFrame + :param thresholdValue: int or float : + It is threshold value of all item + :param condition: str : + It is condition of all item + :param DFtype: str : + It is DataFrame type. It should be sparse or dense. Default DF is sparse. + + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.DF2DB import DF2DB as db + + obj = db.DF2DB(idf, ">=", 16, "sparse/dense") + + obj.getTransactional("outputFileName") # To create transactional database + + obj.getTemporal("outputFileName") # To create temporal database + + obj.getUtility("outputFileName") # To create utility database + """ + + + def __init__(self, inputDF, thresholdValue, condition, DFtype='sparse') -> None: + self.inputDF = inputDF + self.thresholdValue = thresholdValue + self.condition = condition + self.DFtype = DFtype.lower() + if DFtype == 'sparse': + self.DF2DB = DenseFormatDF(self.inputDF, self.condition, self.thresholdValue) + elif DFtype == 'dense': + self.DF2DB = SparseFormatDF(self.inputDF, self.condition, self.thresholdValue) + else: + raise Exception('DF type should be sparse or dense') + +
+[docs] + def getTransactionalDatabase(self, outputFile) -> str: + """ + create transactional database and return outputFileName + :param outputFile: file name or path to store database + :type outputFile: str + :return: outputFile name + :rtype: str + """ + self.DF2DB.createTransactional(outputFile) + return self.DF2DB.getFileName()
+ + +
+[docs] + def getTemporalDatabase(self, outputFile) -> str: + """ + create temporal database and return outputFile name + :param outputFile: file name or path to store database + :type outputFile: str + :return: outputFile name + :rtype: str + """ + self.DF2DB.createTemporal(outputFile) + return self.DF2DB.getFileName()
+ + +
+[docs] + def getUtilityDatabase(self, outputFile) -> str: + """ + create utility database and return outputFile name + :param outputFile: file name or path to store database + :type outputFile: str + :return: outputFile name + :rtype: str + """ + self.DF2DB.createUtility(outputFile) + return self.DF2DB.getFileName()
+
+ + + +if __name__ == '__main__': + obj = DF2DB(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) + obj.getTransactionalDatabase(sys.argv[5]) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/DF2DB/DenseFormatDF.html b/sphinx/_build/html/_modules/PAMI/extras/DF2DB/DenseFormatDF.html new file mode 100644 index 000000000..d62c8f7a1 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/DF2DB/DenseFormatDF.html @@ -0,0 +1,413 @@ + + + + + + PAMI.extras.DF2DB.DenseFormatDF — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.DF2DB.DenseFormatDF

+# DenseFormatDF in this code the dense dataframe is converting databases into different transactional, temporal, utility types.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.DF2DB import DenseFormatDF as db
+#
+#             obj = db.DenseFormatDF(idf, ">=", 16)
+#
+#             obj.save(oFile)
+#
+#             obj.convert2TransactionalDatabase("outputFileName") # To create transactional database
+#
+#             obj.convert2TemporalDatabase("outputFileName") # To create temporal database
+#
+#             obj.convert2MultipleTimeSeries("outputFileName") # To create Mutliple TimeSeries database
+#
+#             obj.convert2UtilityDatabase("outputFileName") # To create utility database
+#
+#             obj.getFileName("outputFileName") # To get file name of the database
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import operator
+from typing import Union
+
+condition_operator = {
+    '<': operator.lt,
+    '>': operator.gt,
+    '<=': operator.le,
+    '>=': operator.ge,
+    '==': operator.eq,
+    '!=': operator.ne
+}
+
+
+
+[docs] +class DenseFormatDF: + """ + :Description: This class create Data Base from DataFrame. + + :Attributes: + + :param inputDF: dataframe : + It is dense DataFrame + :param condition: str : + It is condition to judge the value in dataframe + :param thresholdValue: int or float : + User defined value. + + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.DF2DB import DenseFormatDF as db + + obj = db.DenseFormatDF(iDdf, ">=", 16 ) + + obj.convert2TransactionalDatabase("outputFileName") # To create transactional database + + obj.convert2TemporalDatabase("outputFileName") # To create temporal database + + obj.convert2MultipleTimeSeries("outputFileName") # To create Multiple TimeSeries database + + obj.convert2UtilityDatabase("outputFileName") # To create utility database + + obj.getFileName("outputFileName") # To get file name of the database + """ + + def __init__(self, inputDF) -> None: + self.inputDF = inputDF + self.tids = [] + self.items = [] + self.outputFile = ' ' + self.items = list(self.inputDF.columns.values) + self.tids = list(self.inputDF.index) + +
+[docs] + def convert2TransactionalDatabase(self, outputFile: str, condition: str, thresholdValue: Union[int, float]) -> None: + """ + :Description: Create transactional data base + + :Attributes: + + :param outputFile: Write transactional database into outputFile + + :type outputFile: str + + :param condition: It is condition to judge the value in dataframe + + :type condition: str + + :param thresholdValue: User defined value. + + :type thresholdValue: Union[int, float] + """ + + + self.outputFile = outputFile + with open(outputFile, 'w') as f: + if condition not in condition_operator: + print('Condition error') + else: + for tid in self.tids: + transaction = [item for item in self.items if + condition_operator[condition](self.inputDF.at[tid, item], thresholdValue)] + if len(transaction) > 1: + f.write(f'{transaction[0]}') + for item in transaction[1:]: + f.write(f'\t{item}') + elif len(transaction) == 1: + f.write(f'{transaction[0]}') + else: + continue + f.write('\n')
+ + +
+[docs] + def convert2TemporalDatabase(self, outputFile: str, condition: str, thresholdValue: Union[int, float]) -> None: + """ + :Description: Create temporal database + + :param outputFile: Write temporal database into outputFile + + :type outputFile: str + + :param condition: It is condition to judge the value in dataframe + + :type condition: str + + :param thresholdValue: User defined value. + + :type thresholdValue: Union + """ + + self.outputFile = outputFile + with open(outputFile, 'w') as f: + if condition not in condition_operator: + print('Condition error') + else: + for tid in self.tids: + transaction = [item for item in self.items if + condition_operator[condition](self.inputDF.at[tid, item], thresholdValue)] + if len(transaction) > 1: + f.write(f'{tid + 1}') + for item in transaction: + f.write(f'\t{item}') + elif len(transaction) == 1: + f.write(f'{tid + 1}') + f.write(f'\t{transaction[0]}') + else: + continue + f.write('\n')
+ + +
+[docs] + def convert2MultipleTimeSeries(self, interval: int, outputFile: str, condition: str, + thresholdValue: Union[int, float]) -> None: + """ + :Description: Create the multiple time series database. + + :param outputFile: Write multiple time series database into outputFile. + + :type outputFile: str + + :param interval: Breaks the given timeseries into intervals. + + :type interval: int + + :param condition: It is condition to judge the value in dataframe + + :param thresholdValue: User defined value. + + :type thresholdValue: int or float + """ + self.outputFile = outputFile + writer = open(self.outputFile, 'w+') + # with open(self.outputFile, 'w+') as f: + count = 0 + tids = [] + items = [] + values = [] + for tid in self.tids: + count += 1 + transaction = [item for item in self.items if + condition_operator[condition](self.inputDF.at[tid, item], thresholdValue)] + for i in transaction: + tids.append(count) + items.append(i) + values.append(self.inputDF.at[tid, i]) + if count == interval: + s1, s, ss = str(), str(), str() + if len(values) > 0: + + for j in range(len(tids)): + s1 = s1 + str(tids[j]) + '\t' + for j in range(len(items)): + s = s + items[j] + '\t' + for j in range(len(values)): + ss = ss + str(values[j]) + '\t' + + s2 = s1 + ':' + s + ':' + ss + writer.write("%s\n" % s2) + tids, items, values = [], [], [] + count = 0
+ + +
+[docs] + def convert2UncertainTransactional(self, outputFile: str, condition: str, + thresholdValue: Union[int, float]) -> None: + self.outputFile = outputFile + with open(outputFile, 'w') as f: + if condition not in condition_operator: + print('Condition error') + else: + for tid in self.tids: + transaction = [item for item in self.items if + condition_operator[condition](self.inputDF.at[tid, item], thresholdValue)] + uncertain = [self.inputDF.at[tid, item] for item in self.items if + condition_operator[condition](self.inputDF.at[tid, item], thresholdValue)] + if len(transaction) > 1: + f.write(f'{transaction[0]}') + for item in transaction[1:]: + f.write(f'\t{item}') + f.write(f':') + for value in uncertain: + tt = 0.1 + 0.036 * abs(25 - value) + tt = round(tt, 2) + f.write(f'\t{tt}') + elif len(transaction) == 1: + f.write(f'{transaction[0]}') + tt = 0.1 + 0.036 * abs(25 - uncertain[0]) + tt = round(tt, 2) + f.write(f':{tt}') + else: + continue + f.write('\n')
+ + +
+[docs] + def convert2UtilityDatabase(self, outputFile: str) -> None: + """ + :Description: Create the utility database. + + :param outputFile: Write utility database into outputFile + + :type outputFile: str + + :return: None + """ + + self.outputFile = outputFile + with open(self.outputFile, 'w') as f: + for tid in self.tids: + df = self.inputDF.loc[tid].dropna() + f.write(f'{df.index[0]}') + for item in df.index[1:]: + f.write(f'\t{item}') + f.write(f':{df.sum()}:') + f.write(f'{df.at[df.index[0]]}') + + for item in df.index[1:]: + f.write(f'\t{df.at[item]}') + f.write('\n')
+ + +
+[docs] + def getFileName(self) -> str: + """ + :return: outputFile name + :rtype: str + """ + + return self.outputFile
+
+ + +# Dataframes do not run from a terminal + +# if __name__ == '__main__': +# obj = DenseFormatDF(sys.argv[1], sys.argv[2], sys.argv[3]) +# obj.convert2TransactionalDatabase(sys.argv[4]) +# transactionalDB = obj.getFileName() +# print(transactionalDB) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/DF2DB/SparseFormatDF.html b/sphinx/_build/html/_modules/PAMI/extras/DF2DB/SparseFormatDF.html new file mode 100644 index 000000000..b3b99ed5f --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/DF2DB/SparseFormatDF.html @@ -0,0 +1,273 @@ + + + + + + PAMI.extras.DF2DB.SparseFormatDF — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.DF2DB.SparseFormatDF

+# SparseFormatDF in this code the dense dataframe is converting databases into different transactional, temporal, utility types.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.DF2DB import SparseFormatDF as db
+#
+#             obj = db.SparseFormatDF(idf, ">=", 16)
+#
+#             obj.save(oFile)
+#
+#             obj.createTransactional("outputFileName") # To create transactional database
+#
+#             obj.createTemporal("outputFileName") # To create temporal database
+#
+#             obj.createUtility("outputFileName") # To create utility database
+#
+#             obj.getFileName("outputFileName") # To get file name of the database
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+import pandas as pd
+import sys
+
+
+[docs] +class SparseFormatDF: + """ + :Description: This class create Data Base from DataFrame. + + :Attributes: + + :param inputDF: dataframe : + It is dense DataFrame + :param condition: str : + It is condition to judge the value in dataframe + :param thresholdValue: int or float : + User defined value. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.DF2DB import SparseFormatDF as db + + obj = db.SparseFormatDF(iDdf, ">=", 16) + + obj.save(oFile) + + obj.createTransactional("outputFileName") # To create transactional database + + obj.createTemporal("outputFileName") # To create temporal database + + obj.createUtility("outputFileName") # To create utility database + + obj.getFileName("outputFileName") # To get file name of the database + """ + + + def __init__(self, inputDF, condition: str, thresholdValue: float) -> None: + self.inputDF = inputDF + self.condition = condition + self.thresholdValue = thresholdValue + self.outputFile = '' + if self.condition == '>': + self.df = self.inputDF.query(f'value > {self.thresholdValue}') + elif self.condition == '>=': + self.df = self.inputDF.query(f'value >= {self.thresholdValue}') + elif self.condition == '<=': + self.df = self.inputDF.query(f'value <= {self.thresholdValue}') + elif self.condition == '<': + self.df = self.inputDF.query(f'value < {self.thresholdValue}') + else: + print('Condition error') + self.df = self.df.drop(columns='value') + self.df = self.df.groupby('tid')['item'].apply(list) + +
+[docs] + def createTransactional(self, outputFile: str) -> None: + """ + Create transactional data base + :param outputFile: Write transactional data base into outputFile + :type outputFile: str + :return: None + """ + self.outputFile = outputFile + with open(self.outputFile, 'w') as f: + for line in self.df: + f.write(f'{line[0]}') + for item in line[1:]: + f.write(f',{item}') + f.write('\n')
+ + +
+[docs] + def createTemporal(self, outputFile: str) -> None: + """ + Create temporal data base + :param outputFile: Write temporal data base into outputFile + :type outputFile: str + :return: None + """ + + self.outputFile = outputFile + with open(self.outputFile, 'w') as f: + for tid in self.df.index: + f.write(f'{tid}') + for item in self.df[tid]: + f.write(f',{item}') + f.write('\n')
+ + +
+[docs] + def createUtility(self, outputFile: str) -> None: + """ + Create the utility database. + :param outputFile: Write utility database into outputFile + :type outputFile: str + :return: None + """ + + self.outputFile = outputFile + items = self.inputDF.groupby(level=0)['item'].apply(list) + values = self.inputDF.groupby(level=0)['value'].apply(list) + sums = self.inputDF.groupby(level=0)['value'].sum() + index = list(items.index) + with open(self.outputFile, 'w') as f: + for tid in index: + f.write(f'{items[tid][0]}') + for item in items[tid][1:]: + f.write(f'\t{item}') + f.write(f':{sums[tid]}:') + f.write(f'{values[tid][0]}') + for value in values[tid][1:]: + f.write(f'\t{value}') + f.write('\n')
+ + +
+[docs] + def getFileName(self) -> str: + + return self.outputFile
+
+ + +if __name__ == '__main__': + + obj = SparseFormatDF(sys.argv[1], sys.argv[2]) + obj.getFileName(sys.argv[3]) + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/DF2DB/createTDB.html b/sphinx/_build/html/_modules/PAMI/extras/DF2DB/createTDB.html new file mode 100644 index 000000000..890d076b1 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/DF2DB/createTDB.html @@ -0,0 +1,223 @@ + + + + + + PAMI.extras.DF2DB.createTDB — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for PAMI.extras.DF2DB.createTDB

+# createTDB in this code  we will create transactional Database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.DF2DB import createTDB as ct
+#
+#             obj = ct.createTDB(idf, ">=", 16)
+#
+#             obj.save(oFile)
+#
+
+
+
+
+import sys
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import pandas as pd
+
+
+
+[docs] +class createTDB: + """ + :Description: This class will create Transactional database. + + :param df: It represents the dataframe + :type df: list + :param threshold : It is the threshold value of all item. + :type threshold: int or float + + + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.frequentPattern.basic import FPGrowth as fp + + obj = fp.createTDB(idf, ">=" ) + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + """ + __startTime = float() + __endTime = float() + __memoryUSS = float() + __memoryRSS = float() + __Database = [] + __finalPatterns = {} + + def __init__(self, df, threshold): + self._df = df + self._threshold = int(threshold) + self._items = [] + self._updatedItems = [] + +
+[docs] + def createTDB(self): + """ + :Description: To Create transactional database + """ + i = self._df.columns.values.tolist() + if 'sid' in i: + self._items = self._df['sid'].tolist() + for i in self._items: + i = i.split() + self._updatedItems.append([j for j in i if int(j) > self._threshold])
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x in self._updatedItems: + s = str() + for j in x: + s = s + j + " " + writer.write("%s \n" % s)
+
+ + + +if __name__ == '__main__': + a = createTDB(sys.argv[1], sys.argv[3]) + a.createTDB() + a.save(sys.argv[2]) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/DF2DB/denseDF2DBPlus.html b/sphinx/_build/html/_modules/PAMI/extras/DF2DB/denseDF2DBPlus.html new file mode 100644 index 000000000..5ff049c3d --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/DF2DB/denseDF2DBPlus.html @@ -0,0 +1,289 @@ + + + + + + PAMI.extras.DF2DB.denseDF2DBPlus — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.DF2DB.denseDF2DBPlus

+# DenseFormatDFPlus in this code the dense dataframe is converting databases into different transactional, temporal, utility types.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.DF2DB import DenseFormatDFPlus as db
+#
+#             obj = db.DenseFormatDFPlus(idf, ">=", 16)
+#
+#             obj.save(oFile)
+#
+#             obj.createTransactional("outputFileName") # To create transactional database
+#
+#             obj.createTemporal("outputFileName") # To create temporal database
+#
+#             obj.createUtility("outputFileName") # To create utility database
+#
+#             obj.getFileName("outputFileName") # To get file name of the database
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import pandas as pd
+import sys
+
+
+[docs] +class DenseFormatDFPlus: + """ + :Description: This class create Data Base from DataFrame. + + :Attributes: + + :param inputDF: dataframe : + It is dense DataFrame + :param thresholdConditionDF: str or int or float: + It is condition to judge the value in dataframe + + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.DF2DB import DenseFormatDFPlus as db + + obj = db.DenseFormatDFPlus(iDdf, ">=", 16) + + obj.save(oFile) + + obj.createTransactional("outputFileName") # To create transactional database + + obj.createTemporal("outputFileName") # To create temporal database + + obj.createUtility("outputFileName") # To create utility database + + obj.getFileName("outputFileName") # To get file name of the database + + """ + + def __init__(self, inputDF, thresholdConditionDF) -> None: + self.inputDF = inputDF.T + self.thresholdConditionDF = thresholdConditionDF + self.tids = [] + self.items = [] + self.outputFile = ' ' + self.items = list(self.inputDF.index) + self.tids = list(self.inputDF.columns) + self.df = pd.merge(self.inputDF, self.thresholdConditionDF, left_index=True, right_index=True) + + +
+[docs] + def createTransactional(self, outputFile: str) -> None: + """ + Create transactional data base + :param outputFile: Write transactional data base into outputFile + :type outputFile: str + :return: None + """ + + self.outputFile = outputFile + with open(outputFile, 'w') as f: + for tid in self.tids: + transaction = [item for item in self.items if + (self.df.at[item, 'condition'] == '>' and self.df.at[item, tid] > self.df.at[item, 'threshold']) or + (self.df.at[item, 'condition'] == '>=' and self.df.at[item, tid] >= self.df.at[item, 'threshold']) or + (self.df.at[item, 'condition'] == '<=' and self.df.at[item, tid] <= self.df.at[item, 'threshold']) or + (self.df.at[item, 'condition'] == '<' and self.df.at[item, tid] < self.df.at[item, 'threshold']) or + (self.df.at[item, 'condition'] == '==' and self.df.at[item, tid] == self.df.at[item, 'threshold']) or + (self.df.at[item, 'condition'] == '!=' and self.df.at[item, tid] != self.df.at[item, 'threshold'])] + if len(transaction) > 1: + f.write(f'{transaction[0]}') + for item in transaction[1:]: + f.write(f',{item}') + elif len(transaction) == 1: + f.write(f'{transaction[0]}') + else: + continue + f.write('\n')
+ + + + +
+[docs] + def createTemporal(self, outputFile: str) -> None: + """ + Create temporal data base + :param outputFile: Write temporal data base into outputFile + :type outputFile: str + :return: None + """ + + self.outputFile = outputFile + with open(outputFile, 'w') as f: + for tid in self.tids: + transaction = [item for item in self.items if + (self.df.at[item, 'condition'] == '>' and self.df.at[item, tid] > self.df.at[item, 'threshold']) or + (self.df.at[item, 'condition'] == '>=' and self.df.at[item, tid] >= self.df.at[item, 'threshold']) or + (self.df.at[item, 'condition'] == '<=' and self.df.at[item, tid] <= self.df.at[item, 'threshold']) or + (self.df.at[item, 'condition'] == '<' and self.df.at[item, tid] < self.df.at[item, 'threshold']) or + (self.df.at[item, 'condition'] == '==' and self.df.at[item, tid] == self.df.at[item, 'threshold']) or + (self.df.at[item, 'condition'] == '!=' and self.df.at[item, tid] != self.df.at[item, 'threshold'])] + if len(transaction) > 1: + f.write(f'{tid}') + for item in transaction: + f.write(f',{item}') + elif len(transaction) == 1: + f.write(f'{tid}') + f.write(f',{transaction[0]}') + else: + continue + f.write('\n')
+ + +
+[docs] + def createUtility(self, outputFile: str) -> None: + """ + Create the utility data base. + :param outputFile: Write utility data base into outputFile + :type outputFile: str + :return: None + """ + + self.outputFile = outputFile + with open(self.outputFile, 'w') as f: + for tid in self.tids: + df = self.inputDF.loc[tid].dropna() + f.write(f'{df.index[0]}') + for item in df.index[1:]: + f.write(f'\t{item}') + f.write(f':{df.sum()}:') + f.write(f'{df.at[df.index[0]]}') + for item in df.index[1:]: + f.write(f'\t{df.at[item]}') + f.write('\n')
+ + +
+[docs] + def getFileName(self) -> str: + return self.outputFile
+
+ + +if __name__ == '__main__': + a = DenseFormatDFPlus(sys.argv[1], sys.argv[3]) + a.DenseFormatDFPlus() + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/DF2DB/denseDF2DB_dump.html b/sphinx/_build/html/_modules/PAMI/extras/DF2DB/denseDF2DB_dump.html new file mode 100644 index 000000000..6588a5cb6 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/DF2DB/denseDF2DB_dump.html @@ -0,0 +1,437 @@ + + + + + + PAMI.extras.DF2DB.denseDF2DB_dump — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.DF2DB.denseDF2DB_dump

+# DenseFormatDF_dump in this code the dense dataframe is converting databases into different transactional, temporal, utility types.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.DF2DB import DenseFormatDF_dump as db
+#
+#             obj = db.DenseFormatDF_dump(idf, ">=", 16)
+#
+#             obj.save(oFile)
+#
+#             obj.createTransactional("outputFileName") # To create transactional database
+#
+#             obj.createTemporal("outputFileName") # To create temporal database
+#
+#             obj.createUtility("outputFileName") # To create utility database
+#
+#             obj.getFileName("outputFileName") # To get file name of the database
+#
+
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import pandas as pd
+import sys
+
+[docs] +class DenseFormatDF(): + """ + :Description: This class create Data Base from DataFrame. + + :Attributes: + + :param inputDF: dataframe : + It is dense DataFrame + :param condition: str : + It is condition to judge the value in dataframe + :param thresholdValue: int or float : + User defined value. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.DF2DB import DenseFormatDF_dump as db + + obj = db.DenseFormatDF_dump(iDdf, ">=", 16) + + obj.save(oFile) + + obj.createTransactional("outputFileName") # To create transactional database + + obj.createTemporal("outputFileName") # To create temporal database + + obj.createUtility("outputFileName") # To create utility database + + obj.getFileName("outputFileName") # To get file name of the database + + """ + + def __init__(self, inputDF, condition: str, thresholdValue: float) -> None: + self.inputDF = inputDF + self.condition = condition + self.thresholdValue = thresholdValue + self.tids = [] + self.items = [] + self.outputFile = ' ' + self.items = list(self.inputDF.columns.values)[1:] + self.inputDF = self.inputDF.set_index('tid') + self.tids = list(self.inputDF.index) + + +
+[docs] + def createTransactional(self, outputFile: str) -> None: + """ + :Description: Create transactional data base + + :param outputFile: Write transactional data base into outputFile + + :type outputFile: str + + :return: None + + """ + + self.outputFile = outputFile + with open(outputFile, 'w') as f: + if self.condition == '>': + for tid in self.tids: + transaction = [item for item in self.items if self.inputDF.at[tid, item] > self.thresholdValue] + if len(transaction) > 1: + f.write(f'{transaction[0]}') + for item in transaction[1:]: + f.write(f',{item}') + elif len(transaction) == 1: + f.write(f'{transaction}') + else: + continue + f.write('\n') + + elif self.condition == '>=': + for tid in self.tids: + transaction = [item for item in self.items if self.inputDF.at[tid, item] >= self.thresholdValue] + if len(transaction) > 1: + f.write(f'{transaction[0]}') + for item in transaction[1:]: + f.write(f',{item}') + elif len(transaction) == 1: + f.write(f'{transaction}') + else: + continue + f.write('\n') + + elif self.condition == '<=': + for tid in self.tids: + transaction = [item for item in self.items if self.inputDF.at[tid, item] <= self.thresholdValue] + if len(transaction) > 1: + f.write(f'{transaction[0]}') + for item in transaction[1:]: + f.write(f',{item}') + elif len(transaction) == 1: + f.write(f'{transaction}') + else: + continue + f.write('\n') + + elif self.condition == '<': + for tid in self.tids: + transaction = [item for item in self.items if self.inputDF.at[tid, item] < self.thresholdValue] + if len(transaction) > 1: + f.write(f'{transaction[0]}') + for item in transaction[1:]: + f.write(f',{item}') + elif len(transaction) == 1: + f.write(f'{transaction}') + else: + continue + f.write('\n') + elif self.condition == '==': + for tid in self.tids: + transaction = [item for item in self.items if self.inputDF.at[tid, item] == self.thresholdValue] + if len(transaction) > 1: + f.write(f'{transaction[0]}') + for item in transaction[1:]: + f.write(f',{item}') + elif len(transaction) == 1: + f.write(f'{transaction}') + else: + continue + f.write('\n') + elif self.condition == '!=': + for tid in self.tids: + transaction = [item for item in self.items if self.inputDF.at[tid, item] != self.thresholdValue] + if len(transaction) > 1: + f.write(f'{transaction[0]}') + for item in transaction[1:]: + f.write(f',{item}') + elif len(transaction) == 1: + f.write(f'{transaction}') + else: + continue + f.write('\n') + else: + print('Condition error')
+ + + + +
+[docs] + def createTemporal(self, outputFile: str) -> None: + """ + :Description: Create temporal data base + + :param outputFile: Write temporal data base into outputFile + + :type outputFile: str + + :return: None + """ + + self.outputFile = outputFile + with open(outputFile, 'w') as f: + if self.condition == '>': + for tid in self.tids: + transaction = [item for item in self.items if self.inputDF.at[tid, item] > self.thresholdValue] + if len(transaction) > 1: + f.write(f'{tid}') + for item in transaction: + f.write(f',{item}') + elif len(transaction) == 1: + f.write(f'{tid}') + f.write(f',{transaction}') + else: + continue + f.write('\n') + + elif self.condition == '>=': + for tid in self.tids: + transaction = [item for item in self.items if self.inputDF.at[tid, item] >= self.thresholdValue] + if len(transaction) > 1: + f.write(f'{tid}') + for item in transaction: + f.write(f',{item}') + elif len(transaction) == 1: + f.write(f'{tid}') + f.write(f',{transaction}') + else: + continue + f.write('\n') + + elif self.condition == '<=': + for tid in self.tids: + transaction = [item for item in self.items if self.inputDF.at[tid, item] <= self.thresholdValue] + if len(transaction) > 1: + f.write(f'{tid}') + for item in transaction: + f.write(f',{item}') + elif len(transaction) == 1: + f.write(f'{tid}') + f.write(f',{transaction}') + else: + continue + f.write('\n') + + elif self.condition == '<': + for tid in self.tids: + transaction = [item for item in self.items if self.inputDF.at[tid, item] < self.thresholdValue] + if len(transaction) > 1: + f.write(f'{tid}') + for item in transaction: + f.write(f',{item}') + elif len(transaction) == 1: + f.write(f'{tid}') + f.write(f',{transaction}') + else: + continue + f.write('\n') + elif self.condition == '==': + for tid in self.tids: + transaction = [item for item in self.items if self.inputDF.at[tid, item] == self.thresholdValue] + if len(transaction) > 1: + f.write(f'{tid}') + for item in transaction: + f.write(f',{item}') + elif len(transaction) == 1: + f.write(f'{tid}') + f.write(f',{transaction}') + else: + continue + f.write('\n') + elif self.condition == '!=': + for tid in self.tids: + transaction = [item for item in self.items if self.inputDF.at[tid, item] != self.thresholdValue] + if len(transaction) > 1: + f.write(f'{tid}') + for item in transaction: + f.write(f',{item}') + elif len(transaction) == 1: + f.write(f'{tid}') + f.write(f',{transaction}') + else: + continue + f.write('\n') + + else: + print('Condition error')
+ + +
+[docs] + def createUtility(self, outputFile: str) -> None: + """ + + :Description: Create the utility database. + + :param outputFile: Write utility database into outputFile + + :type outputFile: str + + :return: None + + """ + + self.outputFile = outputFile + with open(self.outputFile, 'w') as f: + for tid in self.tids: + df = self.inputDF.loc[tid].dropna() + f.write(f'{df.index[0]}') + for item in df.index[1:]: + f.write(f'\t{item}') + f.write(f':{df.sum()}:') + f.write(f'{df.at[df.index[0]]}') + for item in df.index[1:]: + f.write(f'\t{df.at[item]}') + f.write('\n')
+ + +
+[docs] + def getFileName(self) -> str: + """ + :return: outputFile name + + :rtype: str + """ + + return self.outputFile
+
+ + + +if __name__ == '__main__': + + + obj = DenseFormatDF(sys.argv[1], sys.argv[2]) + obj.getFileName(sys.argv[3]) + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/DF2DB/sparseDF2DBPlus.html b/sphinx/_build/html/_modules/PAMI/extras/DF2DB/sparseDF2DBPlus.html new file mode 100644 index 000000000..3eb4807ab --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/DF2DB/sparseDF2DBPlus.html @@ -0,0 +1,268 @@ + + + + + + PAMI.extras.DF2DB.sparseDF2DBPlus — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.DF2DB.sparseDF2DBPlus

+# SparseFormatDFPlus in this code the dense dataframe is converting databases into different transactional, temporal, utility types.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.DF2DB import SparseFormatDFPlus as db
+#
+#             obj = db.SparseFormatDFPlus(idf, ">=", 16)
+#
+#             obj.save(oFile)
+#
+#             obj.createTransactional("outputFileName") # To create transactional database
+#
+#             obj.createTemporal("outputFileName") # To create temporal database
+#
+#             obj.createUtility("outputFileName") # To create utility database
+#
+#             obj.getFileName("outputFileName") # To get file name of the database
+#
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+import pandas as pd
+import sys
+
+
+[docs] +class SparseFormatDFPlus: + """ + :Description: This class create Data Base from DataFrame. + + :Attributes: + + :param inputDF: dataframe : + It is dense DataFrame + :param thresholdConditionDF: str : + It is condition to judge the value in dataframe + + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.DF2DB import SparseFormatDFPlus as db + + obj = db.SparseFormatDFPlus(iDdf, ">=", 16) + + obj.save(oFile) + + obj.createTransactional("outputFileName") # To create transactional database + + obj.createTemporal("outputFileName") # To create temporal database + + obj.createUtility("outputFileName") # To create utility database + + obj.getFileName("outputFileName") # To get file name of the database + """ + + + def __init__(self, inputDF, thresholdConditionDF) -> None: + self.inputDF = inputDF + self.thresholdConditionDF = thresholdConditionDF + self.outputFile = '' + self.df = pd.merge(self.inputDF, self.thresholdConditionDF, left_on='item', right_index=True) + self.df.query('(condition == ">" & value > threshold) | (condition == ">=" & value >= threshold) |' + '(condition == "<=" & value <= threshold) | (condition == "<" & value < threshold) |' + '(condition == "==" & value == threshold) | (condition == "!=" & value != threshold)', + inplace=True) + self.df = self.df.drop(columns=['value', 'threshold', 'condition']) + self.df = self.df.groupby(level=0)['item'].apply(list) + +
+[docs] + def createTransactional(self, outputFile: str) -> None: + """ + Create transactional data base + :param outputFile: Write transactional data base into outputFile + :type outputFile: str + :return: None + """ + + self.outputFile = outputFile + with open(self.outputFile, 'w') as f: + for line in self.df: + f.write(f'{line[0]}') + for item in line[1:]: + f.write(f',{item}') + f.write('\n')
+ + +
+[docs] + def createTemporal(self, outputFile: str) -> None: + """ + Create temporal data base + :param outputFile: Write temporal data base into outputFile + :type outputFile: str + :return: None + """ + + self.outputFile = outputFile + with open(self.outputFile, 'w') as f: + for tid in self.df.index: + f.write(f'{tid}') + for item in self.df[tid]: + f.write(f',{item}') + f.write('\n')
+ + +
+[docs] + def createUtility(self, outputFile: str) -> None: + """ + Create the utility data base. + :param outputFile: Write utility data base into outputFile + :type outputFile: str + :return: None + """ + + self.outputFile = outputFile + items = self.inputDF.groupby(level=0)['item'].apply(list) + values = self.inputDF.groupby(level=0)['value'].apply(list) + sums = self.inputDF.groupby(level=0)['value'].sum() + index = list(items.index) + with open(self.outputFile, 'w') as f: + for tid in index: + f.write(f'{items[tid][0]}') + for item in items[tid][1:]: + f.write(f'\t{item}') + f.write(f':{sums[tid]}:') + f.write(f'{values[tid][0]}') + for value in values[tid][1:]: + f.write(f'\t{value}') + f.write('\n')
+ + + +
+[docs] + def getFileName(self) -> str: + + return self.outputFile
+
+ + + +if __name__ == '__main__': + obj = SparseFormatDFPlus(sys.argv[1], sys.argv[2]) + obj.getFileName(sys.argv[3]) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/calculateMISValues/usingBeta.html b/sphinx/_build/html/_modules/PAMI/extras/calculateMISValues/usingBeta.html new file mode 100644 index 000000000..f3736f8a7 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/calculateMISValues/usingBeta.html @@ -0,0 +1,285 @@ + + + + + + PAMI.extras.calculateMISValues.usingBeta — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.calculateMISValues.usingBeta

+# This code is used to calculate multiple minimum support of items in the the given database. Output can be stored in file or as as dataframe.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.calculateMISValues import usingBeta as db
+#
+#             obj = db.usingBeta(iFile, 16, "\t")
+#
+#             obj.getPatternsAsDataFrame("outputFileName") # To create patterns as dataframes
+#
+#             obj.save(oFile)
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+import sys as _sys
+import pandas as _pd
+import validators as _validators
+from urllib.request import urlopen as _urlopen
+
+
+[docs] +class usingBeta(): + """ + + :Description: This code is used to calculate multiple minimum support of items in the the given database. Output can be stored in file or as as dataframe. + + :param iFile: str : + Name of the Input file to get the patterns as DataFrame + :param beta: str : + Name of the output file to store complete set of frequent patterns + :param threshold: int : + The user can specify threshold either in count or proportion of database size. If the program detects the data type of threshold is integer, then it treats threshold is expressed in count. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.calculateMISValues import usingBeta as db + + obj = db.usingBeta(iFile, 3, 16, "\t") + + obj.save(oFile) + + """ + + _iFile: str = ' ' + _beta: int = int() + _sep: str = str() + _threshold: int = int() + _finalPatterns: dict = {} + + def __init__(self, iFile: str, beta: int, threshold: int, sep: str): + self._iFile = iFile + self._beta = beta + self._threshold = threshold + self._sep = sep + self._lno = 0 + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + self._mapSupport = {} + if isinstance(self._iFile, _pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + + if isinstance(self._iFile, str): + if _validators.url(self._iFile): + data = _urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r') as f: + for line in f: + self._lno += 1 + splitter = [i.rstrip() for i in line.split(self._sep)] + splitter = [x for x in splitter if x] + self._Database.append(splitter) + except IOError: + print("File Not Found") + + def _creatingFrequentItems(self) -> tuple: + """ + This function creates frequent items from _database. + :return: frequentTidData that stores frequent items and their tid list. + """ + tidData = {} + self._lno = 0 + for transaction in self._Database: + self._lno = self._lno + 1 + for item in transaction: + if item not in tidData: + tidData[item] = [self._lno] + else: + tidData[item].append(self._lno) + #mini = min([len(k) for k in tidData.values()]) + frequentTidData = {k: len(v) * self._beta for k, v in tidData.items()} + return frequentTidData + +
+[docs] + def calculateMIS(self) -> None: + self._creatingItemSets() + frequentItems = self._creatingFrequentItems() + for x, y in frequentItems.items(): + #self._finalPatterns[x] = min([y, self._threshold]) + if y < self._threshold: + self._finalPatterns[x] = self._threshold + else: + self._finalPatterns[x] = y
+ + +
+[docs] + def getMISDataFrame(self) -> _pd.DataFrame: + """ + Storing items and its respective minimum support in a dataframe + :return: returning items and its respective minimum support in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a, b]) + dataFrame = _pd.DataFrame(data, columns=['Items', 'MIS']) + return dataFrame
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of items and its respective minimum support values will be loaded in to an output file + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x + "\t" + str(int(y)) + writer.write("%s \n" % patternsAndSupport)
+
+ + + + +if __name__ == '__main__': + cd = usingBeta(sys.argv[1], sys.argv[3], sys.argv[4], sys.argv[5]) + cd.calculateMIS() + cd.save(sys.argv[2]) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/calculateMISValues/usingSD.html b/sphinx/_build/html/_modules/PAMI/extras/calculateMISValues/usingSD.html new file mode 100644 index 000000000..2d4a7245f --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/calculateMISValues/usingSD.html @@ -0,0 +1,284 @@ + + + + + + PAMI.extras.calculateMISValues.usingSD — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.calculateMISValues.usingSD

+#This code is used to calculate multiple minimum support of items in the the given database. Output can be stored in file or as as dataframe.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.calculateMISValues import usingSD as db
+#
+#             obj = db.usingSD(iFile, 16, "\t")
+#
+#             obj.getPatterns("outputFileName") # To create patterns as dataframes
+#
+#             obj.save(oFile)
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+import sys as _sys
+import pandas as _pd
+import validators as _validators
+import statistics as _statistics
+from urllib.request import urlopen as _urlopen
+
+
+[docs] +class usingSD(): + """ + + :Description: This code is used to calculate multiple minimum support of items in the the given database. Output can be stored in file or as as dataframe. + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param sd: int : + SD of items to mine complete set of frequent patterns. + :param threshold: int : + The user can specify threshold either in count or proportion of database size. If the program detects the data type of threshold is integer, then it treats threshold is expressed in count. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.calculateMISValues import usingSD as db + + obj = db.usingSD(iFile, 16, "\t") + + obj.getPatterns("outputFileName") # To create patterns in dataframe + + obj.save(oFile) + """ + + _iFile: str = ' ' + _sd: int = int() + _sep: str = str() + _threshold: int = int() + _finalPatterns: dict = {} + + + def __init__(self, iFile: str, threshold: int, sep: str): + self._iFile = iFile + self._threshold = threshold + self._sep = sep + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + self._mapSupport = {} + if isinstance(self._iFile, _pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + + if isinstance(self._iFile, str): + if _validators.url(self._iFile): + data = _urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r') as f: + for line in f: + self._lno += 1 + splitter = [i.rstrip() for i in line.split(self._sep)] + splitter = [x for x in splitter if x] + self._Database.append(splitter) + except IOError: + print("File Not Found") + + def _creatingFrequentItems(self) -> tuple: + """ + This function creates frequent items from _database. + :return: frequentTidData that stores frequent items and their tid list. + :rtype: tuple + """ + tidData = {} + self._lno = 0 + for transaction in self._Database: + self._lno = self._lno + 1 + for item in transaction: + if item not in tidData: + tidData[item] = [self._lno] + else: + tidData[item].append(self._lno) + mini = min([len(k) for k in tidData.values()]) + sd = _statistics.stdev([len(k) for k in tidData.values()]) + frequentTidData = {k: len(v) - sd for k, v in tidData.items()} + return mini, frequentTidData + +
+[docs] + def calculateMIS(self) -> None: + self._creatingItemSets() + mini, frequentItems = self._creatingFrequentItems() + for x, y in frequentItems.items(): + if y < self._threshold: + self._finalPatterns[x] = mini + else: + self._finalPatterns[x] = y
+ + +
+[docs] + def getDataFrame(self) -> _pd.DataFrame: + """ + Storing Items and its respective calculated minimum support values in a dataframe + :return: returning Items and its respective calculated minimum support values in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a, b]) + dataFrame = _pd.DataFrame(data, columns=['Items', 'MIS']) + return dataFrame
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete Items and its respective calculated minimum support values will be loaded in to an output file + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x + "\t" + str(y) + writer.write("%s \n" % patternsAndSupport)
+
+ + +if __name__ == '__main__': + cd = usingSD(_sys.argv[1],_sys.argv[2],_sys.argv[3]) + cd.calculateMIS() + cd.save(_sys.argv[4]) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/dbStats/FuzzyDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/dbStats/FuzzyDatabase.html new file mode 100644 index 000000000..c8646ae98 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/dbStats/FuzzyDatabase.html @@ -0,0 +1,575 @@ + + + + + + PAMI.extras.dbStats.FuzzyDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.dbStats.FuzzyDatabase

+# FuzzyDatabase is class to get stats of fuzzyDatabase.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.dbStats import FuzzyDatabaseStats as db
+#
+#             obj = db.FuzzyDatabase(iFile, "\t")
+#
+#             obj.run()
+#
+#             obj.printStats()
+#
+#             obj.save(oFile)
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+import sys
+import statistics
+import validators
+from urllib.request import urlopen
+import pandas as pd
+import PAMI.extras.graph.plotLineGraphFromDictionary as plt
+
+
+[docs] +class FuzzyDatabase: + """ + :Description: FuzzyDatabase is class to get stats of fuzzyDatabase. + + :Attributes: + + inputFile : file + input file path + sep : str + separator in file. Default is tab space. + + :Methods: + + run() + execute readDatabase function + readDatabase() + read database from input file + getDatabaseSize() + get the size of database + getMinimumTransactionLength() + get the minimum transaction length + getAverageTransactionLength() + get the average transaction length. It is sum of all transaction length divided by database length. + getMaximumTransactionLength() + get the maximum transaction length + getStandardDeviationTransactionLength() + get the standard deviation of transaction length + getSortedListOfItemFrequencies() + get sorted list of item frequencies + getSortedListOfTransactionLength() + get sorted list of transaction length + save(data, outputFile) + store data into outputFile + getMinimumUtility() + get the minimum utility + getAverageUtility() + get the average utility + getMaximumUtility() + get the maximum utility + getSortedUtilityValuesOfItem() + get sorted utility values each item + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.dbStats import FuzzyDatabaseStats as db + + obj = db.FuzzyDatabase(iFile, "\t") + + obj.run() + + obj.printStats() + + obj.save(oFile) + + + + """ + def __init__(self, inputFile: str, sep: str='\t'): + """ + :param inputFile: input file name or path + :type inputFile: str + :param sep: separator + :type sep: str + """ + self.inputFile = inputFile + self.database = {} + self.lengthList = [] + self.utility = {} + self.sep = sep + +
+[docs] + def run(self) -> None: + self.readDatabase()
+ + +
+[docs] + def creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self.Database = [] + self.utilityValues = [] + if isinstance(self.inputFile, pd.DataFrame): + if self.inputFile.empty: + print("its empty..") + i = self.inputFile.columns.values.tolist() + if 'Transactions' in i: + self.Database = self.inputFile['Transactions'].tolist() + if 'Patterns' in i: + self.Database = self.inputFile['Patterns'].tolist() + if 'Utility' in i: + self.utilityValues = self.inputFile['Utility'].tolist() + + if isinstance(self.inputFile, str): + if validators.url(self.inputFile): + data = urlopen(self.inputFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(":")] + transaction = [s for s in temp[0].split(self.sep)] + self.Database.append([x for x in transaction if x]) + utilities = [int(s) for s in temp[2].split(self.sep)] + self.utilityValues.append([x for x in utilities if x]) + else: + try: + with open(self.inputFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(":")] + transaction = [s for s in temp[0].split(self.sep)] + self.Database.append([x for x in transaction if x]) + utilities = [int(s) for s in temp[1].split(self.sep)] + self.utilityValues.append([x for x in utilities if x]) + except IOError: + print("File Not Found") + quit()
+ + +
+[docs] + def readDatabase(self) -> None: + """ + read database from input file and store into database and size of each transaction. + """ + numberOfTransaction = 0 + self.creatingItemSets() + for k in range(len(self.Database)): + numberOfTransaction += 1 + transaction = self.Database[k] + utilities = self.utilityValues[k] + self.database[numberOfTransaction] = transaction + for i in range(len(transaction)): + self.utility[transaction[i]] = self.utility.get(transaction[i],0) + self.utility[transaction[i]] += utilities[i] + self.lengthList = [len(s) for s in self.database.values()] + self.utility = {k: v for k, v in sorted(self.utility.items(), key=lambda x:x[1], reverse=True)}
+ + +
+[docs] + def getDatabaseSize(self) -> int: + """ + get the size of database + :return: dataset size + :rtype: int + """ + return len(self.database)
+ + +
+[docs] + def getTotalNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def getMinimumTransactionLength(self) -> int: + """ + get the minimum transaction length + :return: minimum transaction length + :rtype: int + """ + return min(self.lengthList)
+ + +
+[docs] + def getAverageTransactionLength(self) -> float: + """ + get the average transaction length. It is sum of all transaction length divided by database length. + :return: average transaction length + :rtype: float + """ + totalLength = sum(self.lengthList) + return totalLength / len(self.database)
+ + +
+[docs] + def getMaximumTransactionLength(self) -> int: + """ + get the maximum transaction length + :return: maximum transaction length + :rtype: int + """ + return max(self.lengthList)
+ + +
+[docs] + def getStandardDeviationTransactionLength(self) -> float: + """ + get the standard deviation transaction length + :return: standard deviation transaction length + :rtype: float + """ + return statistics.pstdev(self.lengthList)
+ + +
+[docs] + def getVarianceTransactionLength(self) -> float: + """ + get the variance transaction length + :return: variance transaction length + :rtype: float + """ + return statistics.variance(self.lengthList)
+ + +
+[docs] + def getNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def getSparsity(self) -> float: + # percentage of 0 dense dataframe + """ + get the sparsity of database + :return: dataset sparsity + :rtype: float + """ + matrixSize = self.getDatabaseSize()*len(self.getSortedListOfItemFrequencies()) + return (matrixSize - sum(self.getSortedListOfItemFrequencies().values())) / matrixSize
+ + +
+[docs] + def getSortedListOfItemFrequencies(self) -> dict: + """ + get sorted list of item frequencies + :return: item frequencies + :rtype: dict + """ + itemFrequencies = {} + rangeFrequencies = {} + for tid in self.database: + for item in self.database[tid]: + itemFrequencies[item] = itemFrequencies.get(item, 0) + itemFrequencies[item] += 1 + return {k: v for k, v in sorted(itemFrequencies.items(), key=lambda x:x[1], reverse=True)}
+ + +
+[docs] + def getFrequenciesInRange(self) -> dict: + fre = self.getSortedListOfItemFrequencies() + rangeFrequencies = {} + maximum = max([i for i in fre.values()]) + values = [int(i*maximum/6) for i in range(1,6)] + #print(maximum) + va = len({key: val for key, val in fre.items() if val > 0 and val < values[0]}) + rangeFrequencies[va] = values[0] + for i in range(1,len(values)): + + va = len({key: val for key, val in fre.items() if val < values[i] and val > values[i-1]}) + rangeFrequencies[va] = values[i] + return rangeFrequencies
+ + +
+[docs] + def getTransanctionalLengthDistribution(self) -> dict: + """ + get transaction length + :return: transactional length + :rtype: dict + """ + transactionLength = {} + for length in self.lengthList: + transactionLength[length] = transactionLength.get(length, 0) + transactionLength[length] += 1 + return {k: v for k, v in sorted(transactionLength.items(), key=lambda x:x[0])}
+ + +
+[docs] + def save(self, data: dict, outputFile: str) -> None: + """ + store data into outputFile + :param data: input data + :type data: dict + :param outputFile: output file name or path to store + :type outputFile: str + :return: None + """ + with open(outputFile, 'w') as f: + for key, value in data.items(): + f.write(f'{key}\t{value}\n')
+ + +
+[docs] + def getTotalUtility(self) -> int: + """ + get sum of utility + :return: total utility + :rtype: int + """ + return sum(list(self.utility.values()))
+ + +
+[docs] + def getMinimumUtility(self) -> int: + """ + get the minimum utility + :return: min utility + :rtype: int + """ + return min(list(self.utility.values()))
+ + +
+[docs] + def getAverageUtility(self) -> float: + """ + get the average utility + :return: average utility + :rtype: float + """ + return sum(list(self.utility.values())) / len(self.utility)
+ + +
+[docs] + def getMaximumUtility(self) -> int: + """ + get the maximum utility + :return: max utility + :rtype: int + """ + return max(list(self.utility.values()))
+ + +
+[docs] + def getSortedUtilityValuesOfItem(self) -> dict: + """ + get sorted utility value each item. key is item and value is utility of item + :return: sorted dictionary utility value of item + :rtype: dict + """ + return self.utility
+ + +
+[docs] + def printStats(self) -> None: + print(f'Database size : {self.getDatabaseSize()}') + print(f'Number of items : {self.getTotalNumberOfItems()}') + print(f'Minimum Transaction Size : {self.getMinimumTransactionLength()}') + print(f'Average Transaction Size : {self.getAverageTransactionLength()}') + print(f'Maximum Transaction Size : {self.getMaximumTransactionLength()}') + print(f'Minimum utility : {self.getMinimumUtility()}') + print(f'Average utility : {self.getAverageUtility()}') + print(f'Maximum utility : {self.getMaximumUtility()}') + print(f'Standard Deviation Transaction Size : {self.getStandardDeviationTransactionLength()}') + print(f'Variance : {self.getVarianceTransactionLength()}') + print(f'Sparsity : {self.getSparsity()}')
+ + +
+[docs] + def plotGraphs(self) -> None: + rangeFrequencies = self.getFrequenciesInRange() + print(rangeFrequencies) + transactionLength = self.getTransanctionalLengthDistribution() + plt.plotLineGraphFromDictionary(rangeFrequencies, 100, 'Frequency', 'No of items', 'frequency') + plt.plotLineGraphFromDictionary(transactionLength, 100, 'transaction length', 'transaction length', 'frequency')
+
+ + + +if __name__ == '__main__': + data = {'ts': [1, 1, 3, 4, 5, 6, 7], + + 'Transactions': [['a', 'd', 'e'], ['b', 'a', 'f', 'g', 'h'], ['b', 'a', 'd', 'f'], ['b', 'a', 'c'], + ['a', 'd', 'g', 'k'], + + ['b', 'd', 'g', 'c', 'i'], ['b', 'd', 'g', 'e', 'j']]} + + data = pd.DataFrame.from_dict(data) + #import PAMI.extras.dbStats.UtilityDatabase as uds + import PAMI.extras.graph.plotLineGraphFromDictionary as plt + + #obj = UtilityDatabase(data) + obj = FuzzyDatabase(sys.argv[1], sys.argv[2]) + obj.run() + obj.printStats() + obj.plotGraphs() + + """ + print(f'Database size : {obj.getDatabaseSize()}') + print(f'Minimum Transaction Size : {obj.getMinimumTransactionLength()}') + print(f'Average Transaction Size : {obj.getAverageTransactionLength()}') + print(f'Maximum Transaction Size : {obj.getMaximumTransactionLength()}') + print(f'Standard Deviation Transaction Size : {obj.getStandardDeviationTransactionLength()}') + print(f'Variance : {obj.getVarianceTransactionLength()}') + print(f'Sparsity : {obj.getSparsity()}') + print(f'Number of items : {obj.getTotalNumberOfItems()}') + print(f'Minimum utility : {obj.getMinimumUtility()}') + print(f'Average utility : {obj.getAverageUtility()}') + print(f'Maximum utility : {obj.getMaximumUtility()}') + print(f'sorted utility value each item : {obj.getSortedUtilityValuesOfItem()}')itemFrequencies = obj.getSortedListOfItemFrequencies() + transactionLength = obj.getTransanctionalLengthDistribution() + numberOfTransactionPerTimeStamp = obj.getNumberOfTransactionsPerTimestamp() + plt.plotLineGraphFromDictionary(itemFrequencies, 100, 'itemFrequencies', 'item rank', 'frequency') + plt.plotLineGraphFromDictionary(transactionLength, 100, 'transaction length', 'transaction length', 'frequency') + plt.plotLineGraphFromDictionary(numberOfTransactionPerTimeStamp, 100)""" + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/dbStats/MultipleTimeSeriesFuzzyDatabaseStats.html b/sphinx/_build/html/_modules/PAMI/extras/dbStats/MultipleTimeSeriesFuzzyDatabaseStats.html new file mode 100644 index 000000000..8f50895b9 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/dbStats/MultipleTimeSeriesFuzzyDatabaseStats.html @@ -0,0 +1,526 @@ + + + + + + PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats

+# MultipleTimeSeriesFuzzyDatabaseStats is class to get stats of multiple time series fuzzy database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.dbStats import MultipleTimeSeriesFuzzyDatabaseStats as db
+#
+#             obj = db.MultipleTimeSeriesDatabaseStats(iFile, "\t")
+#
+#             obj.run()
+#
+#             obj.printStats()
+#
+#             obj.save(oFile)
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+import statistics
+import pandas as pd
+import validators
+import numpy as np
+from urllib.request import urlopen
+import sys
+import PAMI.extras.graph.plotLineGraphFromDictionary as plt
+
+
+
+[docs] +class MultipleTimeSeriesFuzzyDatabaseStats: + """ + :Description: MultipleTimeSeriesDatabaseStats is class to get stats of multiple time series fuzzy database. + + :Attributes: + + :param inputFile: file : + input file path + :param sep: str + separator in file. Default is tab space. + + :Methods: + + run() + execute readDatabase function + readDatabase() + read database from input file + getDatabaseSize() + get the size of database + getTotalNumberOfItems() + get the total number of items in a database + getMinimumTransactionLength() + get the minimum transaction length + getAverageTransactionLength() + get the average transaction length. It is sum of all transaction length divided by database length. + getMaximumTransactionLength() + get the maximum transaction length + getStandardDeviationTransactionLength() + get the standard deviation of transaction length + convertDataIntoMatrix() + Convert the database into matrix form to calculate the sparsity and density of a database + getSparsity() + get sparsity value of database + getDensity() + get density value of database + getSortedListOfItemFrequencies() + get sorted list of item frequencies + getSortedListOfTransactionLength() + get sorted list of transaction length + save(data, outputFile) + store data into outputFile + printStats() + To print all the stats of the database + plotGraphs() + To plot all the graphs of frequency disctribution of items and transaction length distribution in database + + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.dbStats import MultipleTimeSeriesFuzzyDatabaseStats as db + + obj = db.MultipleTimeSeriesFuzzyDatabaseStats(iFile, "\t") + + obj.run() + + obj.save(oFile) + + obj.printStats() + + """ + + def __init__(self, inputFile: str, sep: str='\t'): + """ + :param inputFile: input file name or path + :type inputFile: str + :param sep: separator + :type sep: str + """ + self.inputFile = inputFile + self.lengthList = [] + self.sep = sep + self.database = {} + self.itemFrequencies = {} + +
+[docs] + def run(self) -> None: + self.readDatabase()
+ + +
+[docs] + def readDatabase(self) -> None: + """ + read database from input file and store into database and size of each transaction. + """ + self._transactions, self._fuzzyValues, self._Database, self._ts = [], [], [], [] + numberOfTransaction = 0 + if isinstance(self.inputFile, pd.DataFrame): + if self.inputFile.empty: + print("its empty..") + i = self.inputFile.columns.values.tolist() + if 'tid' in i and 'Transactions' in i: + self.database = self.inputFile.set_index('tid').T.to_dict(orient='records')[0] + if 'tid' in i and 'Patterns' in i: + self.database = self.inputFile.set_index('tid').T.to_dict(orient='records')[0] + if isinstance(self.inputFile, str): + if validators.url(self.inputFile): + data = urlopen(self.inputFile) + for line in data: + numberOfTransaction += 1 + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self.sep)] + temp = [x for x in temp if x] + self.database[numberOfTransaction] = temp + else: + try: + with open(self.inputFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + parts = line.split(":") + numberOfTransaction += 1 + parts[0] = parts[0].strip() + parts[1] = parts[1].strip() + parts[2] = parts[2].strip() + times = parts[0].split(self.sep) + items = parts[1].split(self.sep) + quantities = parts[2].split(self.sep) + #print(times, items, quantities) + _time = [x for x in times if x] + items = [x for x in items if x] + quantities = [float(x) for x in quantities if x] + tempList = [] + for k in range(len(_time)): + ite = "(" + _time[k] + "," + items[k] + ")" + tempList.append(ite) + self._ts.append([x for x in times]) + self._transactions.append([x for x in tempList]) + self._fuzzyValues.append([x for x in quantities]) + self.database[numberOfTransaction] = tempList + except IOError: + print("File Not Found") + quit() + self.lengthList = [len(s) for s in self._transactions]
+ + +
+[docs] + def getDatabaseSize(self) -> int: + """ + get the size of database + :return: dataset size + :rtype: int + """ + return len(self.database)
+ + +
+[docs] + def getTotalNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def getMinimumTransactionLength(self) -> int: + """ + get the minimum transaction length + :return: minimum transaction length + :rtype: int + """ + return min(self.lengthList)
+ + +
+[docs] + def getAverageTransactionLength(self) -> float: + """ + get the average transaction length. It is sum of all transaction length divided by database length. + :return: average transaction length + :rtype: float + """ + totalLength = sum(self.lengthList) + return totalLength / len(self.database)
+ + +
+[docs] + def getMaximumTransactionLength(self) -> int: + """ + get the maximum transaction length + :return: maximum transaction length + :rtype: int + """ + return max(self.lengthList)
+ + +
+[docs] + def getStandardDeviationTransactionLength(self) -> float: + """ + get the standard deviation transaction length + :return: standard deviation transaction length + :rtype: float + """ + return statistics.pstdev(self.lengthList)
+ + +
+[docs] + def getVarianceTransactionLength(self) -> float: + """ + get the variance transaction length + :return: variance transaction length + :rtype: float + """ + return statistics.variance(self.lengthList)
+ + +
+[docs] + def getNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def convertDataIntoMatrix(self) -> np.ndarray: + singleItems = self.getSortedListOfItemFrequencies() + # big_array = np.zeros((self.getDatabaseSize(), len(self.getSortedListOfItemFrequencies()))) + itemsets = {} + for i in self.database: + for item in singleItems: + if item in itemsets: + if item in self.database[i]: + itemsets[item].append(1) + else: + itemsets[item].append(0) + else: + if item in self.database[i]: + itemsets[item] = [1] + else: + itemsets[item] = [0] + # new = pd.DataFrame.from_dict(itemsets) + data = list(itemsets.values()) + an_array = np.array(data) + return an_array
+ + +
+[docs] + def getSparsity(self) -> float: + """ + get the sparsity of database. sparsity is percentage of 0 of database. + :return: database sparsity + :rtype: float + """ + big_array = self.convertDataIntoMatrix() + n_zeros = np.count_nonzero(big_array == 0) + return (n_zeros / big_array.size)
+ + +
+[docs] + def getDensity(self) -> float: + """ + get the sparsity of database. sparsity is percentage of 0 of database. + :return: database sparsity + :rtype: float + """ + big_array = self.convertDataIntoMatrix() + n_zeros = np.count_nonzero(big_array != 0) + return (n_zeros / big_array.size)
+ + +
+[docs] + def getSortedListOfItemFrequencies(self) -> dict: + """ + get sorted list of item frequencies + :return: item frequencies + :rtype: dict + """ + itemFrequencies = {} + for line in range(len(self._transactions)): + times = self._ts[line] + items = self._transactions[line] + quantities = self._fuzzyValues[line] + for i in range(0, len(items)): + item = items[i] + if item in itemFrequencies: + itemFrequencies[item] += quantities[i] + else: + itemFrequencies[item] = quantities[i] + self.itemFrequencies = {k: v for k, v in sorted(itemFrequencies.items(), key=lambda x: x[1], reverse=True)} + return self.itemFrequencies
+ + +
+[docs] + def getFrequenciesInRange(self) -> dict: + fre = self.getSortedListOfItemFrequencies() + rangeFrequencies = {} + maximum = max([i for i in fre.values()]) + values = [int(i*maximum/6) for i in range(1,6)] + va = len({key: val for key, val in fre.items() if val > 0 and val < values[0]}) + rangeFrequencies[va] = values[0] + for i in range(1,len(values)): + va = len({key: val for key, val in fre.items() if val < values[i] and val > values[i-1]}) + rangeFrequencies[va] = values[i] + return rangeFrequencies
+ + +
+[docs] + def getTransanctionalLengthDistribution(self) -> dict: + """ + get transaction length + :return: transactional length + :rtype: dict + """ + transactionLength = {} + for length in self.lengthList: + transactionLength[length] = transactionLength.get(length, 0) + transactionLength[length] += 1 + return {k: v for k, v in sorted(transactionLength.items(), key=lambda x: x[0])}
+ + +
+[docs] + def save(self, data: dict, outputFile: str) -> None: + """ + store data into outputFile + :param data: input data + :type data: dict + :param outputFile: output file name or path to store + :type outputFile: str + :return: None + """ + with open(outputFile, 'w') as f: + for key, value in data.items(): + f.write(f'{key}\t{value}\n')
+ + +
+[docs] + def printStats(self) -> None: + print(f'Database size (total no of transactions) : {self.getDatabaseSize()}') + print(f'Number of items : {self.getNumberOfItems()}') + print(f'Minimum Transaction Size : {self.getMinimumTransactionLength()}') + print(f'Average Transaction Size : {self.getAverageTransactionLength()}') + print(f'Maximum Transaction Size : {self.getMaximumTransactionLength()}') + print(f'Standard Deviation Transaction Size : {self.getStandardDeviationTransactionLength()}') + print(f'Variance in Transaction Sizes : {self.getVarianceTransactionLength()}') + print(f'Sparsity : {self.getSparsity()}')
+ + +
+[docs] + def plotGraphs(self) -> None: + # itemFrequencies = self.getFrequenciesInRange() + transactionLength = self.getTransanctionalLengthDistribution() + plt.plotLineGraphFromDictionary(self.itemFrequencies, 100, 0, 'Frequency', 'No of items', 'frequency') + plt.plotLineGraphFromDictionary(transactionLength, 100, 0, 'transaction length', 'transaction length', 'frequency')
+
+ + + +if __name__ == '__main__': + import PAMI.extras.graph.plotLineGraphFromDictionary as plt + import pandas as pd + obj = MultipleTimeSeriesFuzzyDatabaseStats(sys.argv[1]) + obj.run() + obj.printStats() + obj.plotGraphs() +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/dbStats/SequentialDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/dbStats/SequentialDatabase.html new file mode 100644 index 000000000..b09daa8e9 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/dbStats/SequentialDatabase.html @@ -0,0 +1,601 @@ + + + + + + PAMI.extras.dbStats.SequentialDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.dbStats.SequentialDatabase

+# SequentialDatabase is to get stats of database like avarage, minimun, maximum  and so on
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.dbStats import SequentialDatabase as db
+#
+#             obj = db.SequentialDatabase(iFile, "\t")
+#
+#             obj.save(oFile)
+#
+#             obj.run()
+#
+#             obj.printStats()
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import statistics
+import validators
+from urllib.request import urlopen
+import PAMI.extras.graph.plotLineGraphFromDictionary as plt
+import sys
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+
+
+
+[docs] +class SequentialDatabase(): + """ + SequentialDatabase is to get stats of database like avarage, minimun, maximum and so on. + + :Attributes: + + :param inputFile: file : + input file path + :param sep: str + separator in file. Default is tab space. + + :Methods: + + readDatabase(): + read sequential database from input file and store into database and size of each sequence and subsequences. + getDatabaseSize(self): + get the size of database + getTotalNumberOfItems(self): + get the number of items in database. + getMinimumSequenceLength(self): + get the minimum sequence length + getAverageSubsequencePerSequenceLength(self): + get the average subsequence length per sequence length. It is sum of all subsequence length divided by sequence length. + getAverageItemPerSubsequenceLength(self): + get the average Item length per subsequence. It is sum of all item length divided by subsequence length. + getMaximumSequenceLength(self): + get the maximum sequence length + getStandardDeviationSubsequenceLength(self): + get the standard deviation subsequence length + getVarianceSequenceLength(self): + get the variance Sequence length + getSequenceSize(self): + get the size of sequence + getMinimumSubsequenceLength(self): + get the minimum subsequence length + getAverageItemPerSequenceLength(self): + get the average item length per sequence. It is sum of all item length divided by sequence length. + getMaximumSubsequenceLength(self): + get the maximum subsequence length + getStandardDeviationSubsequenceLength(self): + get the standard deviation subsequence length + getVarianceSubsequenceLength(self): + get the variance subSequence length + getSortedListOfItemFrequencies(self): + get sorted list of item frequencies + getFrequenciesInRange(self): + get sorted list of item frequencies in some range + getSequencialLengthDistribution(self): + get Sequence length Distribution + getSubsequencialLengthDistribution(self): + get subSequence length distribution + printStats(self): + to print the all status of sequence database + plotGraphs(self): + to plot the distribution about items, subsequences in sequence and items in subsequence + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.dbStats import SequentialDatabase as db + + obj = db.SequentialDatabase(iFile, "\t") + + obj.save(oFile) + + obj.run() + + obj.printStats() + + + **Executing the code on terminal:** + ------------------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 SequentialDatabase.py <inputFile> + + Example Usage: + + (.venv) $ python3 SequentialDatabase.py sampleDB.txt + + (.venv) $ python3 SequentialDatabase.py sampleDB.txt + + + **Sample run of the importing code:** + ---------------------------------------------------- + import PAMI.extra.DBstats.SequentialDatabase as alg + _ap=alg.SequentialDatabase(inputfile,sep) + _ap.readDatabase() + _ap.printStats() + _ap.plotGraphs() + **Credits:** + --------------------- + The complete program was written by Shota Suzuki under the supervision of Professor Rage Uday Kiran. + """ + + def __init__(self, inputFile: str, sep: str='\t') -> None: + """ + :param inputFile: input file name or path + :type inputFile: str + :param sep: separator + :type sep: str + :return: None + """ + self.inputFile = inputFile + self.seqLengthList = [] + self.subSeqLengthList = [] + self.sep = sep + self.database = {} + +
+[docs] + def readDatabase(self) -> None: + """ + read sequential database from input file and store into database and size of each sequence and subsequences. + """ + if isinstance(self.inputFile, str): + if validators.url(self.inputFile): + data = urlopen(self.inputFile) + rowNum=0 + for line in data: + line.strip() + temp = [i.rstrip() for i in line.split('-1')] + temp = [x for x in temp if x] + temp.pop() + seq = [] + self.seqLengthList.append(len(temp)) + self.subSeqLengthList.append([len(i) for i in temp]) + for i in temp: + if len(i) > 1: + tempSorted=list(sorted(set(i.split()))) + seq.append(tempSorted) + else: + seq.append(i) + rowNum+=1 + if seq: + self.database[rowNum]=seq + else: + with open(self.inputFile, 'r') as f: + rowNum = 0 + for line in f: + temp = [i.rstrip(self.sep) for i in line.split('-1')] + temp = [x for x in temp if x] + temp.pop() + seq = [] + self.seqLengthList.append(len(temp)) + subseq=[] + for i in temp: + if len(i) > 1: + tempSorted = list(sorted(set(i.split()))) + subseq.append(len(tempSorted)) + seq.append(tempSorted) + else: + seq.append(i) + subseq.append(len(i)) + if subseq!=[]: + self.subSeqLengthList.append(subseq) + rowNum += 1 + if seq: + self.database[rowNum] = seq
+ + + +
+[docs] + def getDatabaseSize(self) -> int: + """ + get the size of database + :return: dataset size + :rtype: int + """ + return len(self.database)
+ + +
+[docs] + def getTotalNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def getMinimumSequenceLength(self) -> int: + """ + get the minimum sequence length + :return: minimum sequence length + :rtype: int + """ + return min(self.seqLengthList)
+ + +
+[docs] + def getAverageSubsequencePerSequenceLength(self) -> float: + """ + get the average subsequence length per sequence length. It is sum of all subsequence length divided by sequence length. + :return: average subsequence length per sequence length + :rtype: float + """ + totalLength = sum(self.seqLengthList) + return totalLength / len(self.database)
+ + +
+[docs] + def getAverageItemPerSubsequenceLength(self) -> float: + + """ + get the average Item length per subsequence. It is sum of all item length divided by subsequence length. + :return: average Item length per subsequence + :rtype: float + """ + + totalLength = sum(list(map(sum,self.subSeqLengthList))) + return totalLength / sum(self.seqLengthList)
+ + +
+[docs] + def getMaximumSequenceLength(self) -> int: + """ + get the maximum sequence length + :return: maximum sequence length + :rtype: int + """ + return max(self.seqLengthList)
+ + +
+[docs] + def getStandardDeviationSequenceLength(self) -> float: + """ + get the standard deviation sequence length + :return: standard deviation sequence length + :rtype: float + """ + return statistics.pstdev(self.seqLengthList)
+ + +
+[docs] + def getVarianceSequenceLength(self) -> float: + """ + get the variance Sequence length + :return: variance Sequence length + :rtype: float + """ + return statistics.variance(self.seqLengthList)
+ + +
+[docs] + def getSequenceSize(self) -> int: + """ + get the size of sequence + :return: sequences size + :rtype: int + """ + return sum(self.seqLengthList)
+ + +
+[docs] + def getMinimumSubsequenceLength(self) -> int: + """ + get the minimum subsequence length + :return: minimum subsequence length + :rtype: int + """ + return min(list(map(min,self.subSeqLengthList)))
+ + +
+[docs] + def getAverageItemPerSequenceLength(self) -> float: + """ + get the average item length per sequence. It is sum of all item length divided by sequence length. + :return: average item length per sequence + :rtype: float + """ + totalLength = sum(list(map(sum,self.subSeqLengthList))) + return totalLength / len(self.database)
+ + +
+[docs] + def getMaximumSubsequenceLength(self) -> int: + """ + get the maximum subsequence length + :return: maximum subsequence length + :rtype: int + """ + return max(list(map(max,self.subSeqLengthList)))
+ + +
+[docs] + def getStandardDeviationSubsequenceLength(self) -> float: + """ + get the standard deviation subsequence length + :return: standard deviation subsequence length + :rtype: float + """ + allList=[] + for i in self.subSeqLengthList: + allList=allList+i + return statistics.pstdev(allList)
+ + +
+[docs] + def getVarianceSubsequenceLength(self) -> float: + """ + get the variance subSequence length + :return: variance subSequence length + :rtype: float + """ + allList = [] + for i in self.subSeqLengthList: + allList = allList + i + return statistics.variance(allList)
+ + +
+[docs] + def getSortedListOfItemFrequencies(self) -> Dict[str, int]: + """ + get sorted list of item frequencies + :return: item frequencies + :rtype: dict + """ + itemFrequencies = {} + for seq in self.database: + for sub in self.database[seq]: + for item in sub: + itemFrequencies[item] = itemFrequencies.get(item, 0) + itemFrequencies[item] += 1 + return {k: v for k, v in sorted(itemFrequencies.items(), key=lambda x: x[1], reverse=True)}
+ + +
+[docs] + def getFrequenciesInRange(self) -> Dict[int, int]: + """ + get sorted list of item frequencies in some range + :return: item separated by its frequencies + :rtype: dict + """ + fre = self.getSortedListOfItemFrequencies() + rangeFrequencies = {} + maximum = max([i for i in fre.values()]) + values = [int(i * maximum / 6) for i in range(1, 6)] + va = len({key: val for key, val in fre.items() if val > 0 and val < values[0]}) + rangeFrequencies[values[0]] = va + for i in range(1, len(values)): + va = len({key: val for key, val in fre.items() if val < values[i] and val > values[i - 1]}) + rangeFrequencies[values[i]] = va + return rangeFrequencies
+ + +
+[docs] + def getSequencialLengthDistribution(self) -> Dict[int, int]: + """ + get Sequence length Distribution + :return: Sequence length + :rtype: dict + """ + transactionLength = {} + for length in self.seqLengthList: + transactionLength[length] = transactionLength.get(length, 0) + transactionLength[length] += 1 + return {k: v for k, v in sorted(transactionLength.items(), key=lambda x: x[0])}
+ + +
+[docs] + def getSubsequencialLengthDistribution(self) -> Dict[int, int]: + """ + get subSequence length distribution + :return: subSequence length + :rtype: dict + """ + transactionLength = {} + for sublen in self.subSeqLengthList: + for length in sublen: + transactionLength[length] = transactionLength.get(length, 0) + transactionLength[length] += 1 + return {k: v for k, v in sorted(transactionLength.items(), key=lambda x: x[0])}
+ + +
+[docs] + def run(self) -> None: + self.readDatabase()
+ + +
+[docs] + def printStats(self) -> None: + """ + To print the all status of sequence database + """ + print(f'Database size (total no of sequence) : {self.getDatabaseSize()}') + print(f'Number of items : {self.getTotalNumberOfItems()}') + print(f'Minimum Sequence Size : {self.getMinimumSequenceLength()}') + print(f'Average Sequence Size : {self.getAverageSubsequencePerSequenceLength()}') + print(f'Maximum Sequence Size : {self.getMaximumSequenceLength()}') + print(f'Standard Deviation Sequence Size : {self.getStandardDeviationSequenceLength()}') + print(f'Variance in Sequence Sizes : {self.getVarianceSequenceLength()}') + print(f'Sequence size (total no of subsequence) : {self.getSequenceSize()}') + print(f'Minimum subSequence Size : {self.getMinimumSubsequenceLength()}') + print(f'Average subSequence Size : {self.getAverageItemPerSubsequenceLength()}') + print(f'Maximum subSequence Size : {self.getMaximumSubsequenceLength()}') + print(f'Standard Deviation Sequence Size : {self.getStandardDeviationSubsequenceLength()}') + print(f'Variance in Sequence Sizes : {self.getVarianceSubsequenceLength()}')
+ + +
+[docs] + def plotGraphs(self) -> None: + """ + To plot the distribution about items, subsequences in sequence and items in subsequence + """ + itemFrequencies = self.getFrequenciesInRange() + seqLen = self.getSequencialLengthDistribution() + subLen=self.getSubsequencialLengthDistribution() + plt.plotLineGraphFromDictionary(itemFrequencies, 100, 'Frequency', 'No of items', 'frequency') + plt.plotLineGraphFromDictionary(seqLen, 100, 'sequence length', 'sequence length', 'frequency') + plt.plotLineGraphFromDictionary(subLen, 100, 'subsequence length', 'subsequence length', 'frequency')
+
+ + +if __name__ == '__main__': + _ap=str() + if len(sys.argv)==3 or len(sys.argv)==2: + if len(sys.argv)==3: + _ap=SequentialDatabase(sys.argv[1],sys.argv[2]) + if len(sys.argv) == 2: + _ap = SequentialDatabase(sys.argv[1]) + _ap.run() + _ap.printStats() + _ap.plotGraphs() + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/dbStats/TemporalDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/dbStats/TemporalDatabase.html new file mode 100644 index 000000000..132cf950f --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/dbStats/TemporalDatabase.html @@ -0,0 +1,642 @@ + + + + + + PAMI.extras.dbStats.TemporalDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.dbStats.TemporalDatabase

+# TemporalDatabase is a class used to get stats of database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.dbStats import TemporalDatabase as db
+#
+#             obj = db.TemporalDatabase(iFile, "\t")
+#
+#             obj.save(oFile)
+#
+#             obj.run()
+#
+#             obj.printStats()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import sys
+import statistics
+import pandas as pd
+import validators
+import numpy as np
+from urllib.request import urlopen
+from typing import Dict, Union
+
+
+
+[docs] +class TemporalDatabase: + """ + :Description: TemporalDatabase is class to get stats of database. + + :Attributes: + + :param inputFile : file + input file path + + :param sep : str + separator in file. Default is tab space. + + :Methods: + + run() + execute readDatabase function + readDatabase() + read database from input file + getDatabaseSize() + get the size of database + getMinimumTransactionLength() + get the minimum transaction length + getAverageTransactionLength() + get the average transaction length. It is sum of all transaction length divided by database length. + getMaximumTransactionLength() + get the maximum transaction length + getStandardDeviationTransactionLength() + get the standard deviation of transaction length + getSortedListOfItemFrequencies() + get sorted list of item frequencies + getSortedListOfTransactionLength() + get sorted list of transaction length + save(data, outputFile) + store data into outputFile + getMinimumPeriod() + get the minimum period + getAveragePeriod() + get the average period + getMaximumPeriod() + get the maximum period + getStandardDeviationPeriod() + get the standard deviation period + getNumberOfTransactionsPerTimestamp() + get number of transactions per time stamp. This time stamp range is 1 to max period. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.dbStats import TemporalDatabase as db + + obj = db.TemporalDatabase(iFile, "\t") + + obj.save(oFile) + + obj.run() + + obj.printStats() + """ + + def __init__(self, inputFile: Union[str, pd.DataFrame], sep: str = '\t') -> None: + """ + :param inputFile: input file name or path + :type inputFile: str + :param sep: separator + :type sep: str + :return: None + """ + self.inputFile = inputFile + self.database = {} + self.lengthList = [] + self.timeStampCount = {} + self.periodList = [] + self.sep = sep + self.periods = {} + +
+[docs] + def run(self) -> None: + self.readDatabase()
+ + +
+[docs] + def readDatabase(self) -> None: + """ + read database from input file and store into database and size of each transaction. + And store the period between transactions as list + """ + numberOfTransaction = 0 + if isinstance(self.inputFile, pd.DataFrame): + if self.inputFile.empty: + print("its empty..") + i = self.inputFile.columns.values.tolist() + if 'TS' in i and 'Transactions' in i: + self.database = self.inputFile.set_index('ts').T.to_dict(orient='records')[0] + if 'TS' in i and 'Patterns' in i: + self.database = self.inputFile.set_index('ts').T.to_dict(orient='records')[0] + self.timeStampCount = self.inputFile.groupby('ts').count().T.to_dict(orient='records')[0] + + if isinstance(self.inputFile, str): + if validators.url(self.inputFile): + data = urlopen(self.inputFile) + for line in data: + numberOfTransaction += 1 + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self.sep)] + temp = [x for x in temp if x] + self.database[numberOfTransaction] = temp[1:] + self.timeStampCount[int(temp[0])] = self.timeStampCount.get(int(line[0]), 0) + self.timeStampCount[int(temp[0])] += 1 + else: + try: + with open(self.inputFile, 'r', encoding='utf-8') as f: + for line in f: + numberOfTransaction += 1 + line.strip() + temp = [i.rstrip() for i in line.split(self.sep)] + temp = [x for x in temp if x] + if len(temp) > 0: + self.database[numberOfTransaction] = temp[1:] + self.timeStampCount[int(temp[0])] = self.timeStampCount.get(int(line[0]), 0) + self.timeStampCount[int(temp[0])] += 1 + except IOError: + print("File Not Found") + quit() + self.lengthList = [len(s) for s in self.database.values()] + timeStampList = sorted(list(self.database.keys())) + preTimeStamp = 0 + for ts in timeStampList: + self.periodList.append(int(ts) - preTimeStamp) + preTimeStamp = ts + + for x, y in self.database.items(): + for i in y: + if i not in self.periods: + self.periods[i] = [x, x] + else: + self.periods[i][0] = max(self.periods[i][0], x - self.periods[i][1]) + self.periods[i][1] = x + for key in self.periods: + self.periods[key][0] = max(self.periods[key][0], abs(len(self.database) - self.periods[key][1])) + self.periods = {k: v[0] for k, v in self.periods.items()}
+ + +
+[docs] + def getDatabaseSize(self) -> int: + """ + get the size of database + :return: dataset size + :rtype: int + """ + return len(self.database)
+ + +
+[docs] + def getMinimumTransactionLength(self) -> int: + """ + get the minimum transaction length + :return: minimum transaction length + :rtype: int + """ + return min(self.lengthList)
+ + +
+[docs] + def getAverageTransactionLength(self) -> float: + """ + get the average transaction length. It is sum of all transaction length divided by database length. + :return: average transaction length + :rtype: float + """ + totalLength = sum(self.lengthList) + return totalLength / len(self.database)
+ + +
+[docs] + def getMaximumTransactionLength(self) -> int: + """ + get the maximum transaction length + :return: maximum transaction length + :rtype: int + """ + return max(self.lengthList)
+ + +
+[docs] + def getStandardDeviationTransactionLength(self) -> float: + """ + get the standard deviation transaction length + :return: standard deviation transaction length + :rtype: float + """ + return statistics.pstdev(self.lengthList)
+ + +
+[docs] + def getVarianceTransactionLength(self) -> float: + """ + get the variance transaction length + :return: variance transaction length + :rtype: float + """ + return statistics.variance(self.lengthList)
+ + +
+[docs] + def convertDataIntoMatrix(self) -> np.ndarray: + singleItems = self.getSortedListOfItemFrequencies() + itemsets = {} + for tid in self.database: + for item in singleItems: + if item in itemsets: + if item in self.database[tid]: + itemsets[item].append(1) + else: + itemsets[item].append(0) + else: + if item in self.database[tid]: + itemsets[item] = [1] + else: + itemsets[item] = [0] + data = list(itemsets.values()) + an_array = np.array(data) + return an_array
+ + +
+[docs] + def getSparsity(self) -> float: + """ + get the sparsity of database. sparsity is percentage of 0 of database. + :return: database sparsity + :rtype: float + """ + big_array = self.convertDataIntoMatrix() + n_zeros = np.count_nonzero(big_array == 0) + return (n_zeros / big_array.size)
+ + +
+[docs] + def getDensity(self) -> float: + """ + get the sparsity of database. sparsity is percentage of 0 of database. + :return: database sparsity + :rtype: float + """ + big_array = self.convertDataIntoMatrix() + n_zeros = np.count_nonzero(big_array == 1) + return (1.0 - n_zeros / big_array.size)
+ + +
+[docs] + def getTotalNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def getSortedListOfItemFrequencies(self) -> Dict[str, int]: + """ + get sorted list of item frequencies + :return: item frequencies + :rtype: dict + """ + itemFrequencies = {} + for tid in self.database: + for item in self.database[tid]: + itemFrequencies[item] = itemFrequencies.get(item, 0) + itemFrequencies[item] += 1 + return {k: v for k, v in sorted(itemFrequencies.items(), key=lambda x: x[1], reverse=True)}
+ + +
+[docs] + def getFrequenciesInRange(self) -> Dict[int, int]: + fre = self.getSortedListOfItemFrequencies() + rangeFrequencies = {} + maximum = max([i for i in fre.values()]) + values = [int(i * maximum / 6) for i in range(1, 6)] + # print(maximum) + va = len({key: val for key, val in fre.items() if val > 0 and val < values[0]}) + rangeFrequencies[va] = values[0] + for i in range(1, len(values)): + va = len({key: val for key, val in fre.items() if val < values[i] and val > values[i - 1]}) + rangeFrequencies[va] = values[i] + return rangeFrequencies
+ + +
+[docs] + def getPeriodsInRange(self) -> Dict[int, int]: + fre = {k: v for k, v in sorted(self.periods.items(), key=lambda x: x[1])} + rangePeriods = {} + maximum = max([i for i in fre.values()]) + values = [int(i * maximum / 6) for i in range(1, 6)] + # print(maximum) + va = len({key: val for key, val in fre.items() if val > 0 and val < values[0]}) + rangePeriods[va] = values[0] + for i in range(1, len(values)): + va = len({key: val for key, val in fre.items() if val < values[i] and val > values[i - 1]}) + rangePeriods[va] = values[i] + return rangePeriods
+ + +
+[docs] + def getTransanctionalLengthDistribution(self) -> Dict[int, int]: + """ + get transaction length + :return: transactional length + :rtype: dict + """ + transactionLength = {} + for length in self.lengthList: + transactionLength[length] = transactionLength.get(length, 0) + transactionLength[length] += 1 + return {k: v for k, v in sorted(transactionLength.items(), key=lambda x: x[0])}
+ + +
+[docs] + def save(self, data: dict, outputFile: str) -> None: + """ + store data into outputFile + :param data: input data + :type data: dict + :param outputFile: output file name or path to store + :type outputFile: str + :return: None + """ + with open(outputFile, 'w') as f: + for key, value in data.items(): + f.write(f'{key}\t{value}\n')
+ + +
+[docs] + def getMinimumInterArrivalPeriod(self) -> int: + """ + get the minimum inter arrival period + :return: minimum inter arrival period + :rtype: int + """ + return min(self.periodList)
+ + +
+[docs] + def getAverageInterArrivalPeriod(self) -> float: + """ + get the average inter arrival period. It is sum of all period divided by number of period. + :return: average inter arrival period + :rtype: float + """ + totalPeriod = sum(self.periodList) + return totalPeriod / len(self.periodList)
+ + +
+[docs] + def getMaximumInterArrivalPeriod(self) -> int: + """ + get the maximum inter arrival period + :return: maximum inter arrival period + :rtype: int + """ + return max(self.periodList)
+ + +
+[docs] + def getMinimumPeriodOfItem(self) -> int: + """ + get the minimum period of the item + :return: minimum period + :rtype: int + """ + return min([i for i in self.periods.values()])
+ + +
+[docs] + def getAveragePeriodOfItem(self) -> float: + """ + get the average period of the item + :return: average period + :rtype: float + """ + return sum([i for i in self.periods.values()]) / len(self.periods)
+ + +
+[docs] + def getMaximumPeriodOfItem(self) -> int: + """ + get the maximum period of the item + :return: maximum period + :rtype: int + """ + return max([i for i in self.periods.values()])
+ + +
+[docs] + def getStandardDeviationPeriod(self) -> float: + """ + get the standard deviation period + :return: standard deviation period + :rtype: float + """ + return statistics.pstdev(self.periodList)
+ + +
+[docs] + def getNumberOfTransactionsPerTimestamp(self) -> Dict[int, int]: + """ + get number of transactions per time stamp + :return: number of transactions per time stamp as dict + :rtype: dict + """ + maxTS = max(list(self.timeStampCount.keys())) + return {ts: self.timeStampCount.get(ts, 0) for ts in range(1, maxTS + 1)}
+ + +
+[docs] + def printStats(self) -> None: + print(f'Database size : {self.getDatabaseSize()}') + print(f'Number of items : {self.getTotalNumberOfItems()}') + print(f'Minimum Transaction Size : {self.getMinimumTransactionLength()}') + print(f'Average Transaction Size : {self.getAverageTransactionLength()}') + print(f'Maximum Transaction Size : {self.getMaximumTransactionLength()}') + print(f'Minimum Inter Arrival Period : {self.getMinimumInterArrivalPeriod()}') + print(f'Average Inter Arrival Period : {self.getAverageInterArrivalPeriod()}') + print(f'Maximum Inter Arrival Period : {self.getMaximumInterArrivalPeriod()}') + print(f'Minimum periodicity : {self.getMinimumPeriodOfItem()}') + print(f'Average periodicity : {self.getAveragePeriodOfItem()}') + print(f'Maximum periodicicty : {self.getMaximumPeriodOfItem()}') + print(f'Standard Deviation Transaction Size : {self.getStandardDeviationTransactionLength()}') + print(f'Variance : {self.getVarianceTransactionLength()}') + print(f'Sparsity : {self.getSparsity()}')
+ + +
+[docs] + def plotGraphs(self) -> None: + itemFrequencies = self.getFrequenciesInRange() + transactionLength = self.getTransanctionalLengthDistribution() + plt.plotLineGraphFromDictionary(itemFrequencies, 100, 0, 'Frequency', 'no of items', 'frequency') + plt.plotLineGraphFromDictionary(transactionLength, 100, 0, 'transaction length', 'transaction length', + 'frequency')
+
+ + + +if __name__ == '__main__': + data = {'tid': [1, 2, 3, 4, 5, 6, 7], + + 'Transactions': [['a', 'd', 'e'], ['b', 'a', 'f', 'g', 'h'], ['b', 'a', 'd', 'f'], ['b', 'a', 'c'], + ['a', 'd', 'g', 'k'], + + ['b', 'd', 'g', 'c', 'i'], ['b', 'd', 'g', 'e', 'j']]} + + # data = pd.DataFrame.from_dict('temporal_T10I4D100K.csv') + import PAMI.extras.graph.plotLineGraphFromDictionary as plt + + if len(sys.argv) < 3: + print("Please provide two arguments.") + else: + obj = TemporalDatabase(sys.argv[1], sys.argv[2]) + obj1 = TemporalDatabase(pd.DataFrame(data)) + obj1.run() + if obj1.getDatabaseSize() > 0: + obj1.printStats() + obj1.plotGraphs() + else: + print("No data found in the database.") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/dbStats/TransactionalDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/dbStats/TransactionalDatabase.html new file mode 100644 index 000000000..41a90ccb4 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/dbStats/TransactionalDatabase.html @@ -0,0 +1,515 @@ + + + + + + PAMI.extras.dbStats.TransactionalDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.dbStats.TransactionalDatabase

+# Transactional Database is a class used to get stats of database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.dbStats import TransactionalDatabase as db
+#
+#             obj = db.TransactionalDatabase(iFile, "\t")
+#
+#             obj.save(oFile)
+#
+#             obj.run()
+#
+#             obj.printStats()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+import sys
+import statistics
+import pandas as pd
+import validators
+import numpy as np
+from urllib.request import urlopen
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+import PAMI.extras.graph.plotLineGraphFromDictionary as plt
+
+
+
+[docs] +class TransactionalDatabase: + """ + :Description: TransactionalDatabase is class to get stats of database. + + :Attributes: + + :param inputFile: file : + input file path + :param sep: str + separator in file. Default is tab space. + + :Methods: + + run() + execute readDatabase function + readDatabase() + read database from input file + getDatabaseSize() + get the size of database + getMinimumTransactionLength() + get the minimum transaction length + getAverageTransactionLength() + get the average transaction length. It is sum of all transaction length divided by database length. + getMaximumTransactionLength() + get the maximum transaction length + getStandardDeviationTransactionLength() + get the standard deviation of transaction length + getSortedListOfItemFrequencies() + get sorted list of item frequencies + getSortedListOfTransactionLength() + get sorted list of transaction length + save(data, outputFile) + store data into outputFile + getMinimumPeriod() + get the minimum period + getAveragePeriod() + get the average period + getMaximumPeriod() + get the maximum period + getStandardDeviationPeriod() + get the standard deviation period + getNumberOfTransactionsPerTimestamp() + get number of transactions per time stamp. This time stamp range is 1 to max period. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.dbStats import TransactionalDatabase as db + + obj = db.TransactionalDatabase(iFile, "\t") + + obj.save(oFile) + + obj.run() + + obj.printStats() + + """ + + def __init__(self, inputFile: Union[str, pd.DataFrame], sep: str='\t') -> None: + """ + :param inputFile: input file name or path + :type inputFile: str + :param sep: separator + :type sep: str + :return: None + """ + self.inputFile = inputFile + self.lengthList = [] + self.sep = sep + self.database = {} + self.itemFrequencies = {} + +
+[docs] + def run(self) -> None: + self.readDatabase()
+ + +
+[docs] + def readDatabase(self) -> None: + """ + read database from input file and store into database and size of each transaction. + """ + # self.creatingItemSets() + numberOfTransaction = 0 + if isinstance(self.inputFile, pd.DataFrame): + if self.inputFile.empty: + print("its empty..") + i = self.inputFile.columns.values.tolist() + if 'tid' in i and 'Transactions' in i: + self.database = self.inputFile.set_index('tid').T.to_dict(orient='records')[0] + if 'tid' in i and 'Patterns' in i: + self.database = self.inputFile.set_index('tid').T.to_dict(orient='records')[0] + if isinstance(self.inputFile, str): + if validators.url(self.inputFile): + data = urlopen(self.inputFile) + for line in data: + numberOfTransaction += 1 + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self.sep)] + temp = [x for x in temp if x] + self.database[numberOfTransaction] = temp + else: + try: + with open(self.inputFile, 'r', encoding='utf-8') as f: + for line in f: + numberOfTransaction += 1 + line.strip() + temp = [i.rstrip() for i in line.split(self.sep)] + temp = [x for x in temp if x] + self.database[numberOfTransaction] = temp + except IOError: + print("File Not Found") + quit() + self.lengthList = [len(s) for s in self.database.values()]
+ + +
+[docs] + def getDatabaseSize(self) -> int: + """ + get the size of database + :return: dataset size + :rtype: int + """ + return len(self.database)
+ + +
+[docs] + def getTotalNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def getMinimumTransactionLength(self) -> int: + """ + get the minimum transaction length + :return: minimum transaction length + :rtype: int + """ + return min(self.lengthList)
+ + +
+[docs] + def getAverageTransactionLength(self) -> float: + """ + get the average transaction length. It is sum of all transaction length divided by database length. + :return: average transaction length + :rtype: float + """ + totalLength = sum(self.lengthList) + return totalLength / len(self.database)
+ + +
+[docs] + def getMaximumTransactionLength(self) -> int: + """ + get the maximum transaction length + :return: maximum transaction length + :rtype: int + """ + return max(self.lengthList)
+ + +
+[docs] + def getStandardDeviationTransactionLength(self) -> float: + """ + get the standard deviation transaction length + :return: standard deviation transaction length + :rtype: float + """ + return statistics.pstdev(self.lengthList)
+ + +
+[docs] + def getVarianceTransactionLength(self) -> float: + """ + get the variance transaction length + :return: variance transaction length + :rtype: float + """ + return statistics.variance(self.lengthList)
+ + +
+[docs] + def getNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def convertDataIntoMatrix(self) -> np.ndarray: + singleItems = self.getSortedListOfItemFrequencies() + # big_array = np.zeros((self.getDatabaseSize(), len(self.getSortedListOfItemFrequencies()))) + itemsets = {} + for i in self.database: + for item in singleItems: + if item in itemsets: + if item in self.database[i]: + itemsets[item].append(1) + else: + itemsets[item].append(0) + else: + if item in self.database[i]: + itemsets[item] = [1] + else: + itemsets[item] = [0] + # new = pd.DataFrame.from_dict(itemsets) + data = list(itemsets.values()) + an_array = np.array(data) + return an_array
+ + +
+[docs] + def getSparsity(self) -> float: + """ + get the sparsity of database. sparsity is percentage of 0 of database. + :return: database sparsity + :rtype: float + """ + big_array = self.convertDataIntoMatrix() + n_zeros = np.count_nonzero(big_array == 0) + return (n_zeros / big_array.size)
+ + +
+[docs] + def getDensity(self) -> float: + """ + get the sparsity of database. sparsity is percentage of 0 of database. + :return: database sparsity + :rtype: float + """ + big_array = self.convertDataIntoMatrix() + n_zeros = np.count_nonzero(big_array != 0) + return (n_zeros / big_array.size)
+ + +
+[docs] + def getSortedListOfItemFrequencies(self) -> dict: + """ + get sorted list of item frequencies + :return: item frequencies + :rtype: dict + """ + itemFrequencies = {} + for tid in self.database: + for item in self.database[tid]: + itemFrequencies[item] = itemFrequencies.get(item, 0) + itemFrequencies[item] += 1 + self.itemFrequencies = {k: v for k, v in sorted(itemFrequencies.items(), key=lambda x: x[1], reverse=True)} + return self.itemFrequencies
+ + +
+[docs] + def getFrequenciesInRange(self) -> dict: + fre = self.getSortedListOfItemFrequencies() + rangeFrequencies = {} + maximum = max([i for i in fre.values()]) + values = [int(i*maximum/6) for i in range(1,6)] + va = len({key: val for key, val in fre.items() if val > 0 and val < values[0]}) + rangeFrequencies[va] = values[0] + for i in range(1,len(values)): + va = len({key: val for key, val in fre.items() if val < values[i] and val > values[i-1]}) + rangeFrequencies[va] = values[i] + return rangeFrequencies
+ + +
+[docs] + def getTransanctionalLengthDistribution(self) -> dict: + """ + Get transaction length + :return: a dictionary with transaction length as keys and their total length as values + :rtype: dict + """ + transactionLength = {} + for length in self.lengthList: + transactionLength[length] = transactionLength.get(length, 0) + transactionLength[length] += 1 + return {k: v for k, v in sorted(transactionLength.items(), key=lambda x: x[0])}
+ + +
+[docs] + def save(self, data: dict, outputFile: str) -> None: + """ + store data into outputFile + :param data: input data + :type data: dict + :param outputFile: output file name or path to store + :type outputFile: str + :return: None + """ + with open(outputFile, 'w') as f: + for key, value in data.items(): + f.write(f'{key}\t{value}\n')
+ + +
+[docs] + def printStats(self) -> None: + print(f'Database size (total no of transactions) : {self.getDatabaseSize()}') + print(f'Number of items : {self.getNumberOfItems()}') + print(f'Minimum Transaction Size : {self.getMinimumTransactionLength()}') + print(f'Average Transaction Size : {self.getAverageTransactionLength()}') + print(f'Maximum Transaction Size : {self.getMaximumTransactionLength()}') + print(f'Standard Deviation Transaction Size : {self.getStandardDeviationTransactionLength()}') + print(f'Variance in Transaction Sizes : {self.getVarianceTransactionLength()}') + print(f'Sparsity : {self.getSparsity()}')
+ + +
+[docs] + def plotGraphs(self) -> None: + # itemFrequencies = self.getFrequenciesInRange() + transactionLength = self.getTransanctionalLengthDistribution() + plt.plotLineGraphFromDictionary(self.itemFrequencies, 100, 0, 'Frequency', 'No of items', 'frequency') + plt.plotLineGraphFromDictionary(transactionLength, 100, 0, 'transaction length', 'transaction length', 'frequency')
+
+ + + +if __name__ == '__main__': + data = {'tid': [1, 2, 3, 4, 5, 6, 7], + + 'Transactions': [['a', 'd', 'e'], ['b', 'a', 'f', 'g', 'h'], ['b', 'a', 'd', 'f'], ['b', 'a', 'c'], + ['a', 'd', 'g', 'k'], + + ['b', 'd', 'g', 'c', 'i'], ['b', 'd', 'g', 'e', 'j']]} + + # data = pd.DataFrame.from_dict('transactional_T10I4D100K.csv') + import PAMI.extras.graph.plotLineGraphFromDictionary as plt + import pandas as pd + # obj = TransactionalDatabase(data) + obj = TransactionalDatabase(sys.argv[1], sys.argv[2]) + obj = TransactionalDatabase(pd.DataFrame(data)) + obj.run() + obj.printStats() + obj.plotGraphs() + + + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/dbStats/UncertainTemporalDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/dbStats/UncertainTemporalDatabase.html new file mode 100644 index 000000000..eefb24b2d --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/dbStats/UncertainTemporalDatabase.html @@ -0,0 +1,587 @@ + + + + + + PAMI.extras.dbStats.UncertainTemporalDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.dbStats.UncertainTemporalDatabase

+# UncertainTemporalDatabase is a class used to get stats of database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.dbStats import UncertainTemporalDatabase as db
+#
+#             obj = db.UncertainTemporalDatabase(iFile, "\t")
+#
+#             obj.save(oFile)
+#
+#             obj.run()
+#
+#             obj.printStats()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+import sys
+import statistics
+import pandas as pd
+import validators
+import numpy as np
+from urllib.request import urlopen
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+import PAMI.extras.graph.plotLineGraphFromDictionary as plt
+
+
+
+[docs] +class UncertainTemporalDatabase: + """ + :Description: UncertainTemporalDatabaseStats is class to get stats of database. + + :Attributes: + + :param inputFile : file + input file path + + :param sep : str + separator in file. Default is tab space. + + :Methods: + + run() + execute readDatabase function + readDatabase() + read database from input file + getDatabaseSize() + get the size of database + getMinimumTransactionLength() + get the minimum transaction length + getAverageTransactionLength() + get the average transaction length. It is sum of all transaction length divided by database length. + getMaximumTransactionLength() + get the maximum transaction length + getStandardDeviationTransactionLength() + get the standard deviation of transaction length + getSortedListOfItemFrequencies() + get sorted list of item frequencies + getSortedListOfTransactionLength() + get sorted list of transaction length + save(data, outputFile) + store data into outputFile + getMinimumPeriod() + get the minimum period + getAveragePeriod() + get the average period + getMaximumPeriod() + get the maximum period + getStandardDeviationPeriod() + get the standard deviation period + getNumberOfTransactionsPerTimestamp() + get number of transactions per time stamp. This time stamp range is 1 to max period. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.dbStats import UncertainTemporalDatabase as db + + obj = db.UncertainTemporalDatabase(iFile, "\t") + + obj.save(oFile) + + obj.run() + + obj.printStats() + + """ + + def __init__(self, inputFile: str, sep: str='\t') -> None: + """ + :param inputFile: input file name or path + :type inputFile: str + :param sep: separator + :type sep: str + :return: None + """ + self.inputFile = inputFile + self.database = {} + self.lengthList = [] + self.timeStampCount = {} + self.periodList = [] + self.sep = sep + +
+[docs] + def run(self) -> None: + self.readDatabase()
+ + +
+[docs] + def readDatabase(self) -> None: + """ + read database from input file and store into database and size of each transaction. + And store the period between transactions as list + """ + numberOfTransaction = 0 + if isinstance(self.inputFile, pd.DataFrame): + if self.inputFile.empty: + print("its empty..") + i = self.inputFile.columns.values.tolist() + if 'TS' in i and 'Transactions' in i: + self.database = self.inputFile.set_index('ts').T.to_dict(orient='records')[0] + if 'TS' in i and 'Patterns' in i: + self.database = self.inputFile.set_index('ts').T.to_dict(orient='records')[0] + self.timeStampCount = self.inputFile.groupby('ts').count().T.to_dict(orient='records')[0] + + if isinstance(self.inputFile, str): + if validators.url(self.inputFile): + data = urlopen(self.inputFile) + for line in data: + numberOfTransaction += 1 + line.strip() + line = line.decode("utf-8") + temp = line.split(':') + temp1 = [i.rstrip() for i in temp[0].split(self.sep)] + self.database[numberOfTransaction] = temp[1:] + self.timeStampCount[int(temp[0])] = self.timeStampCount.get(int(line[0]), 0) + self.timeStampCount[int(temp[0])] += 1 + else: + try: + with open(self.inputFile, 'r', encoding='utf-8') as f: + for line in f: + numberOfTransaction += 1 + line = line.strip() + temp1 = line.split(':') + temp = [i for i in temp1[0].split(self.sep)] + if len(temp) > 0: + self.database[numberOfTransaction] = temp[1:] + self.timeStampCount[int(temp[0])] = self.timeStampCount.get(int(line[0]), 0) + self.timeStampCount[int(temp[0])] += 1 + except IOError: + print("File Not Found") + quit() + self.lengthList = [len(s) for s in self.database.values()] + timeStampList = sorted(list(self.database.keys())) + preTimeStamp = 0 + for ts in timeStampList: + self.periodList.append(int(ts) - preTimeStamp) + preTimeStamp = ts
+ + + # for line in self.Database: + # numberOfTransaction += 1 + # self.database[numberOfTransaction] = line[1:] + # self.timeStampCount[int(line[0])] = self.timeStampCount.get(int(line[0]), 0) + # self.timeStampCount[int(line[0])] += 1 + # self.lengthList = [len(s) for s in self.database.values()] + # timeStampList = sorted(list(self.timeStampCount.keys())) + # preTimeStamp = 0 + # for ts in timeStampList: + # self.periodList.append(int(ts)-preTimeStamp) + # preTimeStamp = ts + +
+[docs] + def getDatabaseSize(self) -> int: + """ + get the size of database + :return: dataset size + :rtype: int + """ + return len(self.database)
+ + +
+[docs] + def getMinimumTransactionLength(self) -> int: + """ + get the minimum transaction length + :return: minimum transaction length + :rtype: int + """ + return min(self.lengthList)
+ + +
+[docs] + def getAverageTransactionLength(self) -> float: + """ + get the average transaction length. It is sum of all transaction length divided by database length. + :return: average transaction length + :rtype: float + """ + totalLength = sum(self.lengthList) + return totalLength / len(self.database)
+ + +
+[docs] + def getMaximumTransactionLength(self) -> int: + """ + get the maximum transaction length + :return: maximum transaction length + :rtype: int + """ + return max(self.lengthList)
+ + +
+[docs] + def getStandardDeviationTransactionLength(self) -> float: + """ + get the standard deviation transaction length + :return: standard deviation transaction length + :rtype: float + """ + return statistics.pstdev(self.lengthList)
+ + +
+[docs] + def getVarianceTransactionLength(self) -> float: + """ + get the variance transaction length + :return: variance transaction length + :rtype: float + """ + return statistics.variance(self.lengthList)
+ + +
+[docs] + def convertDataIntoMatrix(self) -> np.ndarray: + singleItems = self.getSortedListOfItemFrequencies() + itemsets = {} + for tid in self.database: + for item in singleItems: + if item in itemsets: + if item in self.database[tid]: + itemsets[item].append(1) + else: + itemsets[item].append(0) + else: + if item in self.database[tid]: + itemsets[item] = [1] + else: + itemsets[item] = [0] + data = list(itemsets.values()) + an_array = np.array(data) + return an_array
+ + +
+[docs] + def getSparsity(self) -> float: + """ + get the sparsity of database. sparsity is percentage of 0 of database. + :return: database sparsity + :rtype: float + """ + big_array = self.convertDataIntoMatrix() + n_zeros = np.count_nonzero(big_array == 0) + return (n_zeros / big_array.size)
+ + +
+[docs] + def getDensity(self) -> float: + """ + get the sparsity of database. sparsity is percentage of 0 of database. + :return: database sparsity + :rtype: float + """ + big_array = self.convertDataIntoMatrix() + n_zeros = np.count_nonzero(big_array == 1) + return (1.0 - n_zeros / big_array.size)
+ + +
+[docs] + def getTotalNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def getSortedListOfItemFrequencies(self) -> dict: + """ + get sorted list of item frequencies + :return: item frequencies + :rtype: dict + """ + itemFrequencies = {} + for tid in self.database: + for item in self.database[tid]: + itemFrequencies[item] = itemFrequencies.get(item, 0) + itemFrequencies[item] += 1 + return {k: v for k, v in sorted(itemFrequencies.items(), key=lambda x: x[1], reverse=True)}
+ + +
+[docs] + def getFrequenciesInRange(self) -> dict: + fre = self.getSortedListOfItemFrequencies() + rangeFrequencies = {} + maximum = max([i for i in fre.values()]) + values = [int(i*maximum/6) for i in range(1,6)] + #print(maximum) + va = len({key: val for key, val in fre.items() if val > 0 and val < values[0]}) + rangeFrequencies[va] = values[0] + for i in range(1,len(values)): + + va = len({key: val for key, val in fre.items() if val < values[i] and val > values[i-1]}) + rangeFrequencies[va] = values[i] + return rangeFrequencies
+ + +
+[docs] + def getTransanctionalLengthDistribution(self) -> dict: + """ + get transaction length + :return: transactional length + :rtype: dict + """ + transactionLength = {} + for length in self.lengthList: + transactionLength[length] = transactionLength.get(length, 0) + transactionLength[length] += 1 + return {k: v for k, v in sorted(transactionLength.items(), key=lambda x: x[0])}
+ + +
+[docs] + def save(self, data: dict, outputFile: str) -> None: + """ + store data into outputFile + :param data: input data + :type data: dict + :param outputFile: output file name or path to store + :type outputFile: str + :return: None + """ + with open(outputFile, 'w') as f: + for key, value in data.items(): + f.write(f'{key}\t{value}\n')
+ + +
+[docs] + def getMinimumPeriod(self) -> int: + """ + get the minimum period + :return: minimum period + :rtype: int + """ + return min(self.periodList)
+ + +
+[docs] + def getAveragePeriod(self) -> float: + """ + get the average period. It is sum of all period divided by number of period. + :return: average period + :rtype: float + """ + totalPeriod = sum(self.periodList) + return totalPeriod / len(self.periodList)
+ + +
+[docs] + def getMaximumPeriod(self) -> int: + """ + get the maximum period + :return: maximum period + :rtype: int + """ + return max(self.periodList)
+ + +
+[docs] + def getStandardDeviationPeriod(self) -> float: + """ + get the standard deviation period + :return: standard deviation period + :rtype: float + """ + return statistics.pstdev(self.periodList)
+ + +
+[docs] + def getNumberOfTransactionsPerTimestamp(self) -> dict: + """ + get number of transactions per time stamp + :return: number of transactions per time stamp as dict + :rtype: float + """ + maxTS = max(list(self.timeStampCount.keys())) + return {ts: self.timeStampCount.get(ts, 0) for ts in range(1, maxTS + 1)}
+ + +
+[docs] + def printStats(self) -> None: + print(f'Database size : {self.getDatabaseSize()}') + print(f'Number of items : {self.getTotalNumberOfItems()}') + print(f'Minimum Transaction Size : {self.getMinimumTransactionLength()}') + print(f'Average Transaction Size : {self.getAverageTransactionLength()}') + print(f'Maximum Transaction Size : {self.getMaximumTransactionLength()}') + print(f'Minimum period : {self.getMinimumPeriod()}') + print(f'Average period : {self.getAveragePeriod()}') + print(f'Maximum period : {self.getMaximumPeriod()}') + print(f'Standard Deviation Transaction Size : {self.getStandardDeviationTransactionLength()}') + print(f'Variance : {self.getVarianceTransactionLength()}') + print(f'Sparsity : {self.getSparsity()}')
+ + +
+[docs] + def plotGraphs(self) -> None: + itemFrequencies = self.getFrequenciesInRange() + transactionLength = self.getTransanctionalLengthDistribution() + #numberOfTransactionPerTimeStamp = self.getNumberOfTransactionsPerTimestamp() + plt.plotLineGraphFromDictionary(itemFrequencies, 100, 'Frequency', 'no of items', 'frequency') + plt.plotLineGraphFromDictionary(transactionLength, 100, 'transaction length', 'transaction length', 'frequency')
+
+ + #plt.plotLineGraphFromDictionary(numberOfTransactionPerTimeStamp, 100) + + +if __name__ == '__main__': + data = {'ts': [1, 1, 3, 4, 5, 6, 7], + + 'Transactions': [['a', 'd', 'e'], ['b', 'a', 'f', 'g', 'h'], ['b', 'a', 'd', 'f'], ['b', 'a', 'c'], + ['a', 'd', 'g', 'k'], + + ['b', 'd', 'g', 'c', 'i'], ['b', 'd', 'g', 'e', 'j']]} + + data = pd.DataFrame.from_dict(data) + obj = UncertainTemporalDatabase(sys.argv[1], sys.argv[2]) + import PAMI.extras.graph.plotLineGraphFromDictionary as plt + obj.run() + obj.printStats() + obj.plotGraphs() + + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/dbStats/UncertainTransactionalDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/dbStats/UncertainTransactionalDatabase.html new file mode 100644 index 000000000..34163acc6 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/dbStats/UncertainTransactionalDatabase.html @@ -0,0 +1,493 @@ + + + + + + PAMI.extras.dbStats.UncertainTransactionalDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.dbStats.UncertainTransactionalDatabase

+# UncertainTransactionalDatabase is a class used to get stats of database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.dbStats import UncertainTransactionalDatabase as db
+#
+#             obj = db.UncertainTransactionalDatabase(iFile, "\t")
+#
+#             obj.save(oFile)
+#
+#             obj.run()
+#
+#             obj.printStats()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+import sys
+import statistics
+import pandas as pd
+import validators
+import numpy as np
+from urllib.request import urlopen
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+import PAMI.extras.graph.plotLineGraphFromDictionary as plt
+
+
+[docs] +class UncertainTransactionalDatabase: + """ + :Description: UncertainTransactionalDatabase is class to get stats of database. + + :Attributes: + + inputFile : file + input file path + sep : str + separator in file. Default is tab space. + + :Methods: + + run() + execute readDatabase function + readDatabase() + read database from input file + getDatabaseSize() + get the size of database + getMinimumTransactionLength() + get the minimum transaction length + getAverageTransactionLength() + get the average transaction length. It is sum of all transaction length divided by database length. + getMaximumTransactionLength() + get the maximum transaction length + getStandardDeviationTransactionLength() + get the standard deviation of transaction length + getVarianceTransactionLength() + get the variance of transaction length + getSparsity() + get the sparsity of database + getSortedListOfItemFrequencies() + get sorted list of item frequencies + getSortedListOfTransactionLength() + get sorted list of transaction length + save(data, outputFile) + store data into outputFile + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.dbStats import UncertainTransactionalDatabase as db + + obj = db.UncertainTransactionalDatabase(iFile, "\t") + + obj.save(oFile) + + obj.run() + + obj.printStats() + + """ + + def __init__(self, inputFile: str, sep: str='\t') -> None: + """ + :param inputFile: input file name or path + :type inputFile: str + :param sep: separator + :type sep: str + :return: None + """ + self.inputFile = inputFile + self.lengthList = [] + self.sep = sep + self.database = {} + +
+[docs] + def run(self) -> None: + self.readDatabase()
+ + +
+[docs] + def readDatabase(self) -> None: + """ + read database from input file and store into database and size of each transaction. + """ + # self.creatingItemSets() + numberOfTransaction = 0 + if isinstance(self.inputFile, pd.DataFrame): + if self.inputFile.empty: + print("its empty..") + i = self.inputFile.columns.values.tolist() + if 'tid' in i and 'Transactions' in i: + self.database = self.inputFile.set_index('tid').T.to_dict(orient='records')[0] + if 'tid' in i and 'Patterns' in i: + self.database = self.inputFile.set_index('tid').T.to_dict(orient='records')[0] + if isinstance(self.inputFile, str): + if validators.url(self.inputFile): + data = urlopen(self.inputFile) + for line in data: + numberOfTransaction += 1 + line = line.strip() + line = line.decode("utf-8") + temp = line.split(':') + temp1 = [i.rstrip() for i in temp[0].split(self.sep)] + temp1 = [x for x in temp if x] + self.database[numberOfTransaction] = temp1 + else: + try: + with open(self.inputFile, 'r', encoding='utf-8') as f: + for line in f: + numberOfTransaction += 1 + line = line.strip() + temp = line.split(':') + temp1 = [i for i in temp[0].split(self.sep)] + self.database[numberOfTransaction] = temp1 + except IOError: + print("File Not Found") + quit() + self.lengthList = [len(s) for s in self.database.values()]
+ + +
+[docs] + def getDatabaseSize(self) -> int: + """ + get the size of database + :return: dataset size + :rtype: int + """ + return len(self.database)
+ + +
+[docs] + def getTotalNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def getMinimumTransactionLength(self) -> int: + """ + get the minimum transaction length + :return: minimum transaction length + :rtype: int + """ + return min(self.lengthList)
+ + +
+[docs] + def getAverageTransactionLength(self) -> float: + """ + get the average transaction length. It is sum of all transaction length divided by database length. + :return: average transaction length + :rtype: float + """ + totalLength = sum(self.lengthList) + return totalLength / len(self.database)
+ + +
+[docs] + def getMaximumTransactionLength(self) -> int: + """ + get the maximum transaction length + :return: maximum transaction length + :rtype: int + """ + return max(self.lengthList)
+ + +
+[docs] + def getStandardDeviationTransactionLength(self) -> float: + """ + get the standard deviation transaction length + :return: standard deviation transaction length + :rtype: float + """ + return statistics.pstdev(self.lengthList)
+ + +
+[docs] + def getVarianceTransactionLength(self) -> float: + """ + get the variance transaction length + :return: variance transaction length + :rtype: float + """ + return statistics.variance(self.lengthList)
+ + +
+[docs] + def getNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def convertDataIntoMatrix(self) -> np.ndarray: + singleItems = self.getSortedListOfItemFrequencies() + # big_array = np.zeros((self.getDatabaseSize(), len(self.getSortedListOfItemFrequencies()))) + itemsets = {} + for i in self.database: + for item in singleItems: + if item in itemsets: + if item in self.database[i]: + itemsets[item].append(1) + else: + itemsets[item].append(0) + else: + if item in self.database[i]: + itemsets[item] = [1] + else: + itemsets[item] = [0] + # new = pd.DataFrame.from_dict(itemsets) + data = list(itemsets.values()) + an_array = np.array(data) + return an_array
+ + +
+[docs] + def getSparsity(self) -> float: + """ + get the sparsity of database. sparsity is percentage of 0 of database. + :return: database sparsity + :rtype: float + """ + big_array = self.convertDataIntoMatrix() + n_zeros = np.count_nonzero(big_array == 0) + return (n_zeros / big_array.size)
+ + +
+[docs] + def getDensity(self) -> float: + """ + get the sparsity of database. sparsity is percentage of 0 of database. + :return: database sparsity + :rtype: float + """ + big_array = self.convertDataIntoMatrix() + n_zeros = np.count_nonzero(big_array != 0) + return (n_zeros / big_array.size)
+ + +
+[docs] + def getSortedListOfItemFrequencies(self) -> dict: + """ + get sorted list of item frequencies + :return: item frequencies + :rtype: dict + """ + itemFrequencies = {} + for tid in self.database: + for item in self.database[tid]: + itemFrequencies[item] = itemFrequencies.get(item, 0) + itemFrequencies[item] += 1 + return {k: v for k, v in sorted(itemFrequencies.items(), key=lambda x: x[1], reverse=True)}
+ + +
+[docs] + def getFrequenciesInRange(self) -> dict: + fre = self.getSortedListOfItemFrequencies() + rangeFrequencies = {} + maximum = max([i for i in fre.values()]) + values = [int(i * maximum / 6) for i in range(1, 6)] + va = len({key: val for key, val in fre.items() if val > 0 and val < values[0]}) + rangeFrequencies[va] = values[0] + for i in range(1, len(values)): + va = len({key: val for key, val in fre.items() if val < values[i] and val > values[i - 1]}) + rangeFrequencies[va] = values[i] + return rangeFrequencies
+ + +
+[docs] + def getTransanctionalLengthDistribution(self) -> dict: + """ + get transaction length + :return: transactional length + :rtype: dict + """ + transactionLength = {} + for length in self.lengthList: + transactionLength[length] = transactionLength.get(length, 0) + transactionLength[length] += 1 + return {k: v for k, v in sorted(transactionLength.items(), key=lambda x: x[0])}
+ + +
+[docs] + def save(self, data: dict, outputFile: str) -> None: + """ + store data into outputFile + :param data: input data + :type data: dict + :param outputFile: output file name or path to store + :type outputFile: str + :return: None + """ + with open(outputFile, 'w') as f: + for key, value in data.items(): + f.write(f'{key}\t{value}\n')
+ + +
+[docs] + def printStats(self) -> None: + print(f'Database size (total no of transactions) : {self.getDatabaseSize()}') + print(f'Number of items : {self.getNumberOfItems()}') + print(f'Minimum Transaction Size : {self.getMinimumTransactionLength()}') + print(f'Average Transaction Size : {self.getAverageTransactionLength()}') + print(f'Maximum Transaction Size : {self.getMaximumTransactionLength()}') + print(f'Standard Deviation Transaction Size : {self.getStandardDeviationTransactionLength()}') + print(f'Variance in Transaction Sizes : {self.getVarianceTransactionLength()}') + print(f'Sparsity : {self.getSparsity()}')
+ + +
+[docs] + def plotGraphs(self) -> None: + itemFrequencies = self.getFrequenciesInRange() + transactionLength = self.getTransanctionalLengthDistribution() + plt.plotLineGraphFromDictionary(itemFrequencies, 100, 'Frequency', 'No of items', 'frequency') + plt.plotLineGraphFromDictionary(transactionLength, 100, 'transaction length', 'transaction length', 'frequency')
+
+ + + +if __name__ == '__main__': + + obj = UncertainTransactionalDatabase(sys.argv[1],sys.argv[2]) + obj.run() + obj.printStats() + obj.plotGraphs() +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/dbStats/UtilityDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/dbStats/UtilityDatabase.html new file mode 100644 index 000000000..2dcbe2572 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/dbStats/UtilityDatabase.html @@ -0,0 +1,535 @@ + + + + + + PAMI.extras.dbStats.UtilityDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.dbStats.UtilityDatabase

+# UtilityDatabase is a code used to get stats of the database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.dbStats import UtilityDatabase as db
+#
+#             obj = db.UtilityDatabase(iFile, "\t")
+#
+#             obj.save(oFile)
+#
+#             obj.run()
+#
+#             obj.printStats()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import sys
+import statistics
+from urllib.request import urlopen
+import pandas as pd
+from typing import Union
+
+
+[docs] +class UtilityDatabase: + """ + :Description: UtilityDatabase is class to get stats of database. + + :Attributes: + + :param inputFile: file : + input file path + :param sep: str + separator in file. Default is tab space. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.dbStats import UtilityDatabase as db + + obj = db.UtilityDatabase(iFile, "\t" ) + + obj.save(oFile) + + obj.run() + + obj.printStats() + + """ + + def __init__(self, inputFile: Union[str, pd.DataFrame], sep: str='\t') -> None: + """ + :param inputFile: input file name or path + :type inputFile: str + :param sep: separator in file + :type sep: str or + :return: None + """ + self.inputFile = inputFile + self.database = {} + self.lengthList = [] + self.utility = {} + self.sep = sep + +
+[docs] + def run(self) -> None: + self.readDatabase()
+ + +
+[docs] + def creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self.Database = [] + self.utilityValues = [] + if isinstance(self.inputFile, pd.DataFrame): + if self.inputFile.empty: + print("its empty..") + i = self.inputFile.columns.values.tolist() + if 'Transactions' in i: + self.Database = self.inputFile['Transactions'].tolist() + if 'Patterns' in i: + self.Database = self.inputFile['Patterns'].tolist() + if 'Utility' in i: + self.utilityValues = self.inputFile['Utility'].tolist() + + if isinstance(self.inputFile, str): + if self.inputFile.startswith("http://") or self.inputFile.startswith("https://"): + data = urlopen(self.inputFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(":")] + transaction = [s for s in temp[0].split(self.sep)] + self.Database.append([x for x in transaction if x]) + utilities = [int(s) for s in temp[2].split(self.sep)] + self.utilityValues.append([x for x in utilities if x]) + else: + try: + with open(self.inputFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(":")] + transaction = [s for s in temp[0].split(self.sep)] + self.Database.append([x for x in transaction if x]) + utilities = [int(s) for s in temp[2].split(self.sep)] + self.utilityValues.append([x for x in utilities if x]) + except IOError: + print("File Not Found") + quit()
+ + +
+[docs] + def readDatabase(self) -> None: + """ + read database from input file and store into database and size of each transaction. + """ + numberOfTransaction = 0 + self.creatingItemSets() + for k in range(len(self.Database)): + numberOfTransaction += 1 + transaction = self.Database[k] + utilities = self.utilityValues[k] + self.database[numberOfTransaction] = transaction + for i in range(len(transaction)): + self.utility[transaction[i]] = self.utility.get(transaction[i],0) + self.utility[transaction[i]] += utilities[i] + self.lengthList = [len(s) for s in self.database.values()] + self.utility = {k: v for k, v in sorted(self.utility.items(), key=lambda x:x[1], reverse=True)}
+ + +
+[docs] + def getDatabaseSize(self) -> int: + """ + get the size of database + :return: size of database + :rtype: int + """ + return len(self.database)
+ + +
+[docs] + def getTotalNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def getMinimumTransactionLength(self) -> int: + """ + get the minimum transaction length + :return: minimum transaction length + :rtype: int + """ + return min(self.lengthList)
+ + +
+[docs] + def getAverageTransactionLength(self) -> float: + """ + get the average transaction length. It is sum of all transaction length divided by database length. + :return: average transaction length + :rtype: float + """ + totalLength = sum(self.lengthList) + return totalLength / len(self.database)
+ + +
+[docs] + def getMaximumTransactionLength(self) -> int: + """ + get the maximum transaction length + :return: maximum transaction length + :rtype: int + """ + return max(self.lengthList)
+ + +
+[docs] + def getStandardDeviationTransactionLength(self) -> float: + """ + get the standard deviation transaction length + :return: standard deviation transaction length + :rtype: float + """ + return statistics.pstdev(self.lengthList)
+ + +
+[docs] + def getVarianceTransactionLength(self) -> float: + """ + get the variance transaction length + :return: variance transaction length + :rtype: float + """ + return statistics.variance(self.lengthList)
+ + +
+[docs] + def getNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def getSparsity(self) -> float: + # percentage of 0 dense dataframe + """ + get the sparsity of database + :return: sparsity of database in floating values + :rtype: float + """ + matrixSize = self.getDatabaseSize()*len(self.getSortedListOfItemFrequencies()) + return (matrixSize - sum(self.getSortedListOfItemFrequencies().values())) / matrixSize
+ + +
+[docs] + def getSortedListOfItemFrequencies(self) -> dict: + """ + get sorted list of item frequencies + :return: item frequencies + :rtype: dict + """ + itemFrequencies = {} + for tid in self.database: + for item in self.database[tid]: + itemFrequencies[item] = itemFrequencies.get(item, 0) + itemFrequencies[item] += 1 + return {k: v for k, v in sorted(itemFrequencies.items(), key=lambda x:x[1], reverse=True)}
+ + +
+[docs] + def getFrequenciesInRange(self) -> dict: + """ + This function is used to get the Frequencies in range + :return: Frequencies In Range + :rtype: dict + """ + fre = self.getSortedListOfItemFrequencies() + rangeFrequencies = {} + maximum = max([i for i in fre.values()]) + values = [int(i*maximum/6) for i in range(1,6)] + #print(maximum) + va = len({key: val for key, val in fre.items() if val > 0 and val < values[0]}) + rangeFrequencies[va] = values[0] + for i in range(1,len(values)): + + va = len({key: val for key, val in fre.items() if val < values[i] and val > values[i-1]}) + rangeFrequencies[va] = values[i] + return rangeFrequencies
+ + +
+[docs] + def getTransanctionalLengthDistribution(self) -> dict: + """ + get transaction length + :return: a dictionary of Transaction Length Distribution + :rtype: dict + """ + transactionLength = {} + for length in self.lengthList: + transactionLength[length] = transactionLength.get(length, 0) + transactionLength[length] += 1 + return {k: v for k, v in sorted(transactionLength.items(), key=lambda x:x[0])}
+ + +
+[docs] + def save(self, data, outputFile) -> None: + """ + store data into outputFile + :param data: input data + :type data: dict + :param outputFile: output file name or path to store + :type outputFile: str + :return: None + """ + with open(outputFile, 'w') as f: + for key, value in data.items(): + f.write(f'{key}\t{value}\n')
+ + +
+[docs] + def getTotalUtility(self) -> int: + """ + get sum of utility + :return: total utility + :rtype: int + """ + return sum(list(self.utility.values()))
+ + +
+[docs] + def getMinimumUtility(self) -> int: + """ + get the minimum utility + :return: integer value of minimum utility + :rtype: int + """ + return min(list(self.utility.values()))
+ + +
+[docs] + def getAverageUtility(self) -> float: + """ + get the average utility + :return: average utility + :rtype: float + """ + return sum(list(self.utility.values())) / len(self.utility)
+ + +
+[docs] + def getMaximumUtility(self) -> int: + """ + get the maximum utility + :return: integer value of maximum utility + :rtype: int + """ + return max(list(self.utility.values()))
+ + +
+[docs] + def getSortedUtilityValuesOfItem(self) -> dict: + """ + get sorted utility value each item. key is item and value is utility of item + :return: sorted dictionary utility value of item + :rtype: dict + """ + return self.utility
+ + +
+[docs] + def printStats(self) -> None: + + """ + This function is used to print the results + """ + print(f'Database size : {self.getDatabaseSize()}') + print(f'Number of items : {self.getTotalNumberOfItems()}') + print(f'Minimum Transaction Size : {self.getMinimumTransactionLength()}') + print(f'Average Transaction Size : {self.getAverageTransactionLength()}') + print(f'Maximum Transaction Size : {self.getMaximumTransactionLength()}') + print(f'Minimum utility : {self.getMinimumUtility()}') + print(f'Average utility : {self.getAverageUtility()}') + print(f'Maximum utility : {self.getMaximumUtility()}') + print(f'Standard Deviation Transaction Size : {self.getStandardDeviationTransactionLength()}') + print(f'Variance : {self.getVarianceTransactionLength()}') + print(f'Sparsity : {self.getSparsity()}')
+ + + +
+[docs] + def plotGraphs(self) -> None: + + itemFrequencies = self.getFrequenciesInRange() + transactionLength = self.getTransanctionalLengthDistribution() + plt.plotLineGraphFromDictionary(itemFrequencies, 100, 0, 'Frequency', 'no of items', 'frequency') + plt.plotLineGraphFromDictionary(transactionLength, 100, 0, 'transaction length', 'transaction length', 'frequency')
+
+ + + +if __name__ == '__main__': + import PAMI.extras.graph.plotLineGraphFromDictionary as plt + + try: + if len(sys.argv) != 3: + raise ValueError("Missing some of the input parameters. Format: python UtilityDatabase.py <fileName> <seperator (optional)>") + + iFile, separator = sys.argv[1], sys.argv[2] + obj = UtilityDatabase(iFile, separator) + obj.run() + if obj.getDatabaseSize() > 0: + obj.printStats() + obj.plotGraphs() + else: + print("No data found in the database.") + + except ValueError as ve: + print(f"ValueError: {ve}") + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/fuzzyTransformation/temporalToFuzzy.html b/sphinx/_build/html/_modules/PAMI/extras/fuzzyTransformation/temporalToFuzzy.html new file mode 100644 index 000000000..c0cf0fcdc --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/fuzzyTransformation/temporalToFuzzy.html @@ -0,0 +1,319 @@ + + + + + + PAMI.extras.fuzzyTransformation.temporalToFuzzy — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.fuzzyTransformation.temporalToFuzzy

+# temporalToFuzzy is used to convert the transactional database into Fuzzy transactional database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.FuzzyTransformation import temporalToFuzzy as db
+#
+#             obj = db.temporalToFuzzy(iFile, FuzFile, oFile, "\t" )
+#
+#             obj.startConvert()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+from PAMI.extras.fuzzyTransformation import abstract as _ab
+
+
+
+[docs] +class temporalToFuzzy(_ab._convert): + """ + + :Description: + temporalToFuzzy is used to convert the temporal database into Fuzzy temporal database. + + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param fuzFile: str : + Name of the Fuzzy File to process set of data. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.FuzzyTransformation import temporalToFuzzy as db + + obj = db.temporalToFuzzy(iFile, FuzFile, oFile, "\t" ) + + obj.startConvert() + + """ + + _iFile: str = ' ' + _fuzFile: str = ' ' + _oFile: str = ' ' + + + def __init__(self, iFile: str, fuzFile: str, oFile: str, sep: str='\t'): + self._iFile = iFile + self._fuzFile = fuzFile + self._oFile = oFile + self._sep = sep + self._RegionsCal = [] + self._RegionsLabel = [] + self._LabelKey = {} + self._LabelKeyOne = {} + self._dbLen = 0 + self._list = [] + self._transactionsDB = [] + self._fuzzyValuesDB = [] + self._tsDB = [] + self._fuzzyRegionReferenceMap = {} + + def _creatingItemSets(self) -> None: + """ + To process the input file and store the timestamps, items, and their values as lists respectively. + """ + self._transactionsDB, self._fuzzyValuesDB, self._tsDB = [], [], [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._transactionsDB = self._iFile['Transactions'].tolist() + if 'fuzzyValues' in i: + self._fuzzyValuesDB = self._iFile['Utilities'].tolist() + + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line = line.decode("utf-8") + line = line.split("\n")[0] + parts = line.split(":") + items = parts[0].split(self._sep) + quantities = parts[1].split(self._sep) + self._tsDB.append(int(items[0])) + self._transactionsDB.append([x for x in items[1:]]) + self._fuzzyValuesDB.append([x for x in quantities]) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + parts = line.split(":") + parts[0] = parts[0].strip() + parts[1] = parts[1].strip() + items = parts[0].split(self._sep) + quantities = parts[1].split(self._sep) + self._tsDB.append(int(items[0])) + self._transactionsDB.append([x for x in items[1:]]) + self._fuzzyValuesDB.append([x for x in quantities]) + except IOError: + print("File Not Found") + quit() + + def _fuzzyMembershipFunc(self) -> None: + """ + The Fuzzy file is processed and labels created according the boundaries specified in input file. + """ + try: + with open(self._fuzFile, 'r', encoding='utf-8') as f: + count = 0 + for line in f: + line = line.split("\n")[0] + parts = line.split(self._sep) + lowerBound = parts[0].strip() + upperBound = parts[1].strip() + lb_Label = parts[2].strip() + ub_Label = parts[3].strip() + self._RegionsCal.append([int(lowerBound), int(upperBound)]) + self._RegionsLabel.append([lb_Label, ub_Label]) + for i in range(0, 2): + if ub_Label not in self._LabelKey: + self._LabelKey[ub_Label] = count + count += 1 + self._LabelKeyOne = {v: k for k, v in self._LabelKey.items()} + except IOError: + print("File Not Found") + quit() + + def _Regions(self, quantity: int) -> None: + """ + calculate the labelled region of input "quantity" + :param quantity: represents the quantity of item + :type quantity: int + :return: None + """ + self._list = [0] * len(self._LabelKey) + if self._RegionsCal[0][0] < quantity <= self._RegionsCal[0][1]: + self._list[0] = 1 + return + elif quantity >= self._RegionsCal[-1][0]: + self._list[-1] = 1 + return + else: + for i in range(1, len(self._RegionsCal) - 1): + if self._RegionsCal[i][0] <= quantity <= self._RegionsCal[i][1]: + base = self._RegionsCal[i][1] - self._RegionsCal[i][0] + self._list[self._LabelKey[self._RegionsLabel[i][0]]] = float((self._RegionsCal[i][1] - quantity) / base) + self._list[self._LabelKey[self._RegionsLabel[i][1]]] = float((quantity - self._RegionsCal[i][0]) / base) + return + +
+[docs] + def startConvert(self) -> None: + """ + Main method to convert the temporal database into fuzzy database. + """ + _writer = open(self._oFile, 'w+') + self._creatingItemSets() + self._fuzzyMembershipFunc() + for line in range(len(self._transactionsDB)): + item_list = self._transactionsDB[line] + fuzzyValues_list = self._fuzzyValuesDB[line] + self._dbLen += 1 + s = str(self._tsDB[line]) + ss = str() + for i in range(0, len(item_list)): + item = item_list[i] + fuzzy_ref = fuzzyValues_list[i] + regionsList = self._Regions(int(fuzzy_ref)) + self._fuzzyRegionReferenceMap[fuzzy_ref] = regionsList + s1 = [self._list.index(i) for i in self._list if i!=0] + for k in s1: + s = s + item + '.' + self._LabelKeyOne[k] + '\t' + st = round(self._list[k], 2) + ss = ss + str(st) + '\t' + s2 = s.strip('\t') + ":" + ss + _writer.write("%s \n" % s2)
+
+ + + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = temporalToFuzzy(_ab._sys.argv[1], _ab._sys.argv[2], _ab._sys.argv[3], _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = temporalToFuzzy(_ab._sys.argv[1], _ab._sys.argv[2], _ab._sys.argv[3]) + _ap.startConvert() + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/fuzzyTransformation/transactionalToFuzzy.html b/sphinx/_build/html/_modules/PAMI/extras/fuzzyTransformation/transactionalToFuzzy.html new file mode 100644 index 000000000..c839c0a61 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/fuzzyTransformation/transactionalToFuzzy.html @@ -0,0 +1,324 @@ + + + + + + PAMI.extras.fuzzyTransformation.transactionalToFuzzy — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.fuzzyTransformation.transactionalToFuzzy

+# transactionalToFuzzy is used to convert the transactional database into Fuzzy transactional database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.FuzzyTransformation import transactionalToFuzzyTimeSeries as db
+#
+#             obj = db.transactionalToFuzzy(iFile, FuzFile, oFile, "\t" )
+#
+#             obj.startConvert()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+from PAMI.extras.fuzzyTransformation import abstract as _ab
+
+
+
+[docs] +class transactionalToFuzzy(_ab._convert): + """ + + :Description: + transactionalToFuzzy is used to convert the transactional database into Fuzzy transactional database. + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param fuzFile: str : + Name of the FuzFile to process set of data. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + + :Attributes: + + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.FuzzyTransformation import transactionalToFuzzy as db + + obj = db.transactionalToFuzzy(iFile, FuzFile, oFile, "\t" ) + + obj.startConvert() + """ + + _iFile: str = ' ' + _fuzFile: str = ' ' + _oFile: str = ' ' + + + def __init__(self, iFile: str, fuzFile: str, oFile: str, sep: str='\t'): + self._iFile = iFile + self._fuzFile = fuzFile + self._oFile = oFile + self._sep = sep + self._RegionsCal = [] + self._RegionsLabel = [] + self._LabelKey = {} + self._LabelKeyOne = {} + self._dbLen = 0 + self._list = [] + self._transactionsDB = [] + self._fuzzyValuesDB = [] + self._tsDB = [] + self._fuzzyRegionReferenceMap = {} + + def _creatingItemSets(self) -> None: + """ + To process the input file and store the timestamps, items, and their values as lists respectively. + """ + self._transactionsDB, self._fuzzyValuesDB, self._tsDB = [], [], [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._transactionsDB = self._iFile['Transactions'].tolist() + if 'fuzzyValues' in i: + self._fuzzyValuesDB = self._iFile['Utilities'].tolist() + + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line = line.decode("utf-8") + line = line.split("\n")[0] + parts = line.split(":") + items = parts[0].split(self._sep) + quantities = parts[1].split(self._sep) + #self._tsDB.append(int(items[0])) + self._transactionsDB.append([x for x in items]) + self._fuzzyValuesDB.append([x for x in quantities]) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + parts = line.split(":") + parts[0] = parts[0].strip() + parts[1] = parts[1].strip() + items = parts[0].split(self._sep) + quantities = parts[1].split(self._sep) + #self._tsDB.append(int(items[0])) + self._transactionsDB.append([x for x in items]) + self._fuzzyValuesDB.append([x for x in quantities]) + except IOError: + print("File Not Found") + quit() + + def _fuzzyMembershipFunc(self) -> None: + """ + The Fuzzy file is processed and labels created according the boundaries specified in input file. + """ + try: + with open(self._fuzFile, 'r', encoding='utf-8') as f: + count = 0 + for line in f: + line = line.split("\n")[0] + parts = line.split(self._sep) + lowerBound = parts[0].strip() + upperBound = parts[1].strip() + lb_Label = parts[2].strip() + ub_Label = parts[3].strip() + self._RegionsCal.append([int(lowerBound), int(upperBound)]) + self._RegionsLabel.append([lb_Label, ub_Label]) + for i in range(0, 2): + if ub_Label not in self._LabelKey: + self._LabelKey[ub_Label] = count + count += 1 + self._LabelKeyOne = {v: k for k, v in self._LabelKey.items()} + except IOError: + print("File Not Found") + quit() + + def _Regions(self, quantity: int) -> None: + """ + calculate the labelled region of input "quantity" + :param quantity: represents the quantity of item + :type quantity: int + :return: None + """ + self._list = [0] * len(self._LabelKey) + if self._RegionsCal[0][0] < quantity <= self._RegionsCal[0][1]: + self._list[0] = 1 + return + elif quantity >= self._RegionsCal[-1][0]: + self._list[-1] = 1 + return + else: + for i in range(1, len(self._RegionsCal) - 1): + if self._RegionsCal[i][0] <= quantity <= self._RegionsCal[i][1]: + base = self._RegionsCal[i][1] - self._RegionsCal[i][0] + self._list[self._LabelKey[self._RegionsLabel[i][0]]] = float((self._RegionsCal[i][1] - quantity) / base) + self._list[self._LabelKey[self._RegionsLabel[i][1]]] = float((quantity - self._RegionsCal[i][0]) / base) + return + +
+[docs] + def startConvert(self) -> None: + """ + Main method to convert the temporal database into fuzzy database. + """ + _writer = open(self._oFile, 'w+') + self._creatingItemSets() + self._fuzzyMembershipFunc() + for line in range(len(self._transactionsDB)): + item_list = self._transactionsDB[line] + fuzzyValues_list = self._fuzzyValuesDB[line] + self._dbLen += 1 + s = str(self._tsDB[line]) + ss = str() + for i in range(0, len(item_list)): + item = item_list[i] + fuzzy_ref = fuzzyValues_list[i] + regionsList = self._Regions(int(fuzzy_ref)) + self._fuzzyRegionReferenceMap[fuzzy_ref] = regionsList + s1 = [self._list.index(i) for i in self._list if i!=0] + for k in s1: + s = s + item + '.' + self._LabelKeyOne[k] + '\t' + st = round(self._list[k], 2) + ss = ss + str(st) + '\t' + s2 = s.strip('\t') + ":" + ss + _writer.write("%s \n" % s2)
+
+ + + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = transactionalToFuzzy(_ab._sys.argv[1], _ab._sys.argv[2], _ab._sys.argv[3], _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = transactionalToFuzzy(_ab._sys.argv[1], _ab._sys.argv[2], _ab._sys.argv[3]) + _ap.startConvert() + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/generateDatabase/generateSpatioTemporalDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/generateDatabase/generateSpatioTemporalDatabase.html new file mode 100644 index 000000000..065726448 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/generateDatabase/generateSpatioTemporalDatabase.html @@ -0,0 +1,269 @@ + + + + + + PAMI.extras.generateDatabase.generateSpatioTemporalDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.generateDatabase.generateSpatioTemporalDatabase

+# generateSpatioTemporalDatabase is a code used to convert the database into SpatioTemporal database.
+#
+#   **Importing this algorithm into a python program**
+#   --------------------------------------------------------
+#
+#             from PAMI.extras.generateDatabase import generateSpatioTemporalDatabase as db
+#
+#             obj = db.generateSpatioTemporalDatabase(0, 100, 0, 100, 10, 10, 0.5, 0.9, 0.5, 0.9)
+#
+#             obj.save()
+#
+#             obj.createPoint(0,100,0,100) # values can be according to the size of data
+#
+#             obj.saveAsFile("outputFileName") # To create a file
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+import random as rand
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+import pandas
+import sys
+
+
+[docs] +class spatioTemporalDatabaseGenerator(): + """ + + :Description: generateSpatioTemporalDatabase is a code used to convert the database into SpatioTemporal database. + + :param xmin: int : + To give minimum value for x + :param xmax: int : + To give maximum value for x + :param ymin: int : + To give minimum value for y + :param ymax: int : + To give maximum value for y + :param maxTimeStamp: int : + maximum Time Stamp for the database + :param numberOfItems: int : + number of items in the database + :param itemChanceLow: int or float : + least chance for item in the database + :param itemChanceHigh: int or float : + highest chance for item in the database + :param timeStampChanceLow: int or float : + lowest time stamp value + :param timeStampChanceHigh: int or float: + highest time stamp value + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.generateDatabase import generateSpatioTemporalDatabase as db + + obj = db.generateSpatioTemporalDatabase(0, 100, 0, 100, 10, 10, 0.5, 0.9, 0.5, 0.9) + + obj.save(oFile) + + obj.createPoint(0,100,0,100) # values can be according to the size of data + + obj.saveAsFile("outputFileName") # To create a file + + """ + + coinFlip = [True, False] + timestamp = list() + items = list() + alreadyAdded = set() + outFileName="" + +
+[docs] + def createPoint(self, xmin: int, xmax: int, ymin: int, ymax: int) -> Tuple[int, int]: + x = rand.randint(xmin, xmax) + y = rand.randint(ymin, ymax) + coordinate = tuple([x, y]) + return coordinate
+ + + def __init__(self,xmin: int,xmax: int,ymin: int,ymax: int,maxTimeStamp: int,numberOfItems: int, itemChanceLow: float, + itemChanceHigh: float, timeStampChanceLow: float, + timeStampChanceHigh: float) -> None: + coinFlip = [True, False] + timeStamp = 1 + self.timeStampList = list() + self.itemList = list() + + while timeStamp != maxTimeStamp + 1: + itemSet=list() + for i in range(1, numberOfItems+1): + #rand1=rand.rand(itemChanceLow,itemChanceHigh) + #rand2 = rand.rand(timeStampChanceLow, timeStampChanceHigh) + if rand.choices(coinFlip, weights=[itemChanceLow,itemChanceHigh], k=1)[0]: + coordinate=self.createPoint(xmin, xmax, ymin, ymax) + coordinate=tuple(coordinate) + if coordinate not in self.alreadyAdded: + coordinate=list(coordinate) + itemSet.append(coordinate) + coordinate=tuple(coordinate) + self.alreadyAdded.add(coordinate) + if itemSet != []: + self.timeStampList.append( + timeStamp) + self.itemList.append( + itemSet) + if rand.choices(coinFlip, weights=[itemChanceLow,itemChanceHigh], k=1)[0]: + timeStamp += 1 + self.outFileName = "temporal_" + str(maxTimeStamp // 1000) + \ + "KI" + str(numberOfItems) + "C" + str(itemChanceLow) + "T" + str(timeStampChanceLow) + ".csv" + + + + +
+[docs] + def saveAsFile(self, outFileName="", sep="\t") -> None: + if outFileName != "": + self.outFileName = outFileName + + file = open( + self.outFileName, "w") + + for i in range(len(self.timeStampList)): + file.write( + str(self.timeStampList[i])) + for j in range(len(self.itemList[i])): + file.write( + sep + str(self.itemList[i][j])) + file.write('\n') + + file.close()
+
+ + + +if __name__ == "__main__": + xmin=0 + xmax=100 + ymin=0 + ymax=100 + maxTimeStamp = 10 + numberOfItems = 10 + itemChanceLow = 0.5 + itemChanceHigh = 0.9 + timeStampChanceLow = 0.5 + timeStampChanceHigh = 0.9 + obj = spatioTemporalDatabaseGenerator(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) + obj.saveAsFile(sys.argv[5]) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/generateDatabase/generateTemporalDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/generateDatabase/generateTemporalDatabase.html new file mode 100644 index 000000000..2f785c82a --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/generateDatabase/generateTemporalDatabase.html @@ -0,0 +1,399 @@ + + + + + + PAMI.extras.generateDatabase.generateTemporalDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.generateDatabase.generateTemporalDatabase

+# generateTemporalDatabase is a code used to convert the database into Temporal database.
+#
+#  **Importing this algorithm into a python program**
+#  --------------------------------------------------------
+#
+#             from PAMI.extras.generateDatabase import generateTemporalDatabase as db
+#
+#             obj = db.generateTemporalDatabase(100, 10, 6, oFile, %, "\t")
+#
+#             obj.save()
+#
+#             obj.getFileName("outputFileName") # to create a file
+#
+#             obj.getDatabaseAsDataFrame("outputFileName") # to convert database into dataframe
+#
+#             obj.createTemporalFile("outputFileName") # to get outputfile
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+from typing import Tuple, List, Union
+import pandas as pd
+import numpy as np
+import random
+import sys
+import os
+
+
+[docs] +class generateTemporalDatabase: + """ + :Description: generateTemporalDatabase creates a temporal database and outputs a database or a frame depending on input + + :Attributes: + :param numOfTransactions: int + number of transactions + :param avgLenOfTransactions: int + average length of transactions + :param numItems: int + number of items + :param outputFile: str + output file name + :param percentage: int + percentage of coinToss for TID of temporalDatabase + :param sep: str + seperator for database output file + :param typeOfFile: str + specify database or dataframe to get corresponding output + + :Methods: + getFileName(): + returns filename + createTemporalFile(): + creates temporal database file or dataframe + getDatabaseAsDataFrame: + returns dataframe + performCoinFlip(): + Perform a coin flip with the given probability + tuning(): + Tune the arrayLength to match avgLenOfTransactions + createTemporalFile(): + create Temporal database or dataframe depending on input + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.generateDatabase import generateTemporalDatabase as db + + numOfTransactions = 100 + numItems = 15 + avgTransactionLength = 6 + outFileName = 'temporal_ot.txt' + sep = '\t' + percent = 75 + frameOrBase = "dataframe" # if you want to get dataframe as output + frameOrBase = "database" # if you want to get database/csv/file as output + + temporalDB = db.generateTemporalDatabase(numOfTransactions, avgTransactionLength, numItems, outFileName, percent, sep, frameOrBase ) + temporalDB.createTemporalFile() + print(temporalDB.getDatabaseAsDataFrame()) + + """ + def __init__(self, numOfTransactions: int, avgLenOfTransactions: int, + numItems: int, outputFile: str, percentage: int=50, + sep: str='\t', typeOfFile: str="Database") -> None: + + """ + :Description: Initialize the generateTemporalDatabase class + + :Attributes: + :param numOfTransactions: int + number of transactions + :param avgLenOfTransactions: int + average length of transactions + :param numItems: int + number of items + :param outputFile: str + output file name + :param percentage: int + percentage of coinToss for TID of temporalDatabase + :param sep: str + seperator for database output file + :param typeOfFile: str + specify database or dataframe to get corresponding output + + :Methods: + getFileName(): + returns filename + createTemporalFile(): + creates temporal database file or dataframe + getDatabaseAsDataFrame: + returns dataframe + performCoinFlip(): + Perform a coin flip with the given probability + tuning(): + Tune the arrayLength to match avgLenOfTransactions + createTemporalFile(): + create Temporal database or dataframe depending on input + + """ + + self.numOfTransactions = numOfTransactions + self.avgLenOfTransactions = avgLenOfTransactions + self.numItems = numItems + self.outputFile = outputFile + if percentage > 1: + self.percentage = percentage / 100 + else: + self.percentage = percentage + self.sep = sep + self.typeOfFile = typeOfFile.lower() + +
+[docs] + def getFileName(self) -> str: + """ + return filename + :return: filename + :rtype: str + """ + return self.outputFile
+ + +
+[docs] + def getDatabaseAsDataFrame(self) -> pd.DataFrame: + """ + return dataframe + :return: dataframe + :rtype: pd.DataFrame + """ + return self.df
+ + +
+[docs] + def performCoinFlip(self, probability: float) -> bool: + """ + Perform a coin flip with the given probability. + :param probability: probability to perform coin flip + :type probability: float + :return: True if coin flip is performed, False otherwise + :rtype: bool + """ + result = np.random.choice([0, 1], p=[1 - probability, probability]) + return result == 1
+ + + +
+[docs] + def tuning(self, array, sumRes) -> list: + """ + Tune the array so that the sum of the values is equal to sumRes + + :param array: list of values + + :type array: list + + :param sumRes: target sum + + :type sumRes: int + + :return: list of values with the sum equal to sumRes after tuning + + :rtype: list + """ + + # first generate a random array of length n whose values average to m + values = np.random.randint(1, self.numItems, len(array)) + + while np.sum(values) != sumRes: + # get index of largest value + # if sum is too large, decrease the largest value + if np.sum(values) > sumRes: + maxIndex = np.argmax(values) + values[maxIndex] -= 1 + # if sum is too small, increase the smallest value + else: + minIndex = np.argmin(values) + values[minIndex] += 1 + + # get location of all values greater than numItems + + for i in range(len(array)): + array[i][1] = values[i] + + return array
+ + +
+[docs] + def createTemporalFile(self) -> None: + """ + create Temporal database or dataframe depending on input + :return: None + """ + + db = [] + lineSize = [] + for i in range(self.numOfTransactions): + db.append([i]) + if self.performCoinFlip(self.percentage): + lineSize.append([i,0]) + + # make it so that sum of lineSize[1] equal to numTransactions * avgLenOfTransactions + sumRes = self.numOfTransactions * self.avgLenOfTransactions + self.tuning(lineSize, sumRes) + + for i in range(len(lineSize)): + if lineSize[i][1] > self.numItems: + raise ValueError("Error: Either increase numItems or decrease avgLenOfTransactions or modify percentage") + line = np.random.choice(range(1, self.numItems + 1), lineSize[i][1], replace=False) + db[lineSize[i][0]].extend(line) + + if self.typeOfFile == "database": + with open(self.outputFile, "w") as outFile: + for line in db: + outFile.write(self.sep.join(map(str, line)) + '\n') + outFile.close() + + if self.typeOfFile == "dataframe": + data = { + 'timestamp': [line[0] for line in db], + 'transactions': pd.Series([line[1:] for line in db]) + } + self.df = pd.DataFrame(data) + + print("Temporal database created successfully")
+
+ + + +if __name__ == '__main__': + numOfTransactions = 100 + numItems = 20 + avgTransactionLength = 6 + outFileName = 'temporal_out.txt' + sep = '\t' + frameOrBase = "database" + + temporalDB = generateTemporalDatabase(numOfTransactions, avgTransactionLength, numItems, outFileName) + + temporalDB.createTemporalFile() + + numOfTransactions = 100 + numItems = 15 + avgTransactionLength = 6 + outFileName = 'temporal_ot.txt' + sep = '\t' + percent = 75 + frameOrBase = "dataframe" + + temporalDB = generateTemporalDatabase(numOfTransactions, avgTransactionLength, numItems, outFileName, percent, sep, frameOrBase ) + temporalDB.createTemporalFile() + print(temporalDB.getDatabaseAsDataFrame()) + + obj = generateTemporalDatabase(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) + obj.createTemporalFile(sys.argv[5]) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/generateDatabase/generateTransactionalDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/generateDatabase/generateTransactionalDatabase.html new file mode 100644 index 000000000..0b83e0a31 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/generateDatabase/generateTransactionalDatabase.html @@ -0,0 +1,332 @@ + + + + + + PAMI.extras.generateDatabase.generateTransactionalDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.generateDatabase.generateTransactionalDatabase

+# generateTransactionalDatabase is a code used to convert the database into Temporal database.
+#
+#  **Importing this algorithm into a python program**
+#  --------------------------------------------------------
+#     from PAMI.extras.generateDatabase import generateTransactionalDatabase as db
+#     obj = db(10, 5, 10)
+#     obj.create()
+#     obj.save('db.txt')
+#     print(obj.getTransactions()) to get the transactional database as a pandas dataframe
+
+# **Running the code from the command line**
+# --------------------------------------------------------
+#     python generateDatabase.py 10 5 10 db.txt
+#     cat db.txt
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+
+import numpy as np
+import pandas as pd
+import sys
+
+
+
+[docs] +class generateTransactionalDatabase: + """ + :Description Generate a transactional database with the given number of lines, average number of items per line, and total number of items + + :Attributes: + numLines: int + - number of lines + avgItemsPerLine: int + - average number of items per line + numItems: int + - total number of items + + :Methods: + create: + Generate the transactional database + save: + Save the transactional database to a file + getTransactions: + Get the transactional database + + + + + """ + + def __init__(self, numLines, avgItemsPerLine, numItems) -> None: + """ + Initialize the transactional database with the given parameters + + Parameters: + numLines: int - number of lines + avgItemsPerLine: int - average number of items per line + numItems: int - total number of items + """ + + self.numLines = numLines + self.avgItemsPerLine = avgItemsPerLine + self.numItems = numItems + self.db = [] + +
+[docs] + def tuning(self, array, sumRes) -> list: + """ + Tune the array so that the sum of the values is equal to sumRes + + :param array: list of values + + :type array: list + + :param sumRes: the sum of the values in the array to be tuned + + :type sumRes: int + + :return: list of values with the tuned values and the sum of the values in the array to be tuned and sumRes is equal to sumRes + + :rtype: list + """ + + while np.sum(array) != sumRes: + # get index of largest value + randIndex = np.random.randint(0, len(array)) + # if sum is too large, decrease the largest value + if np.sum(array) > sumRes: + array[randIndex] -= 1 + # if sum is too small, increase the smallest value + else: + minIndex = np.argmin(array) + array[randIndex] += 1 + return array
+ + + +
+[docs] + def generateArray(self, nums, avg, maxItems) -> list: + """ + Generate a random array of length n whose values average to m + + :param nums: number of values + + :type nums: list + + :param avg: average value + + :type avg: float + + :param maxItems: maximum value + + :type maxItems: int + + :return: random array + + :rtype: list + """ + + # generate n random values + values = np.random.randint(1, maxItems, nums) + + sumRes = nums * avg + + self.tuning(values, sumRes) + + # if any value is less than 1, increase it and tune the array again + while np.any(values < 1): + for i in range(nums): + if values[i] < 1: + values[i] += 1 + self.tuning(values, sumRes) + + while np.any(values > maxItems): + for i in range(nums): + if values[i] > maxItems: + values[i] -= 1 + self.tuning(values, sumRes) + + + # if all values are same then randomly increase one value and decrease another + while np.all(values == values[0]): + values[np.random.randint(0, nums)] += 1 + self.tuning(values, sumRes) + + return values
+ + +
+[docs] + def create(self) -> None: + """ + Generate the transactional database + :return: None + """ + db = set() + + values = self.generate_array(self.numLines, self.avgItemsPerLine, self.numItems) + + for value in values: + line = np.random.choice(range(1, self.numItems + 1), value, replace=False) + self.db.append(line)
+ + +
+[docs] + def save(self, filename) -> None: + """ + Save the transactional database to a file + + :param filename: name of the file + + :type filename: str + + :return: None + """ + + with open(filename, 'w') as f: + for line in self.db: + f.write(','.join(map(str, line)) + '\n')
+ + +
+[docs] + def getTransactions(self) -> pd.DataFrame: + """ + Get the transactional database + + :return: the transactional database + + :rtype: pd.DataFrame + """ + df = pd.DataFrame(self.db) + return df
+
+ + + +if __name__ == "__main__": + # test the class + db = generateTransactionalDatabase(10, 5, 10) + db.create() + db.save('db.txt') + print(db.getTransactions()) + + obj = generateTransactionalDatabase(sys.argv[1], sys.argv[2], sys.argv[3]) + obj.create() + obj.save(sys.argv[4]) + # print(obj.getTransactions()) + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/generateLatexGraphFile.html b/sphinx/_build/html/_modules/PAMI/extras/generateLatexGraphFile.html new file mode 100644 index 000000000..932bad373 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/generateLatexGraphFile.html @@ -0,0 +1,223 @@ + + + + + + PAMI.extras.generateLatexGraphFile — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.generateLatexGraphFile

+# generateLatexGraphFile is used to convert the given data into LatexGraphFile.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#     from PAMI.extras.syntheticDataGenerator import generateLatexGraphFile as fuz
+#
+#     obj = fuz.generateLatexGraphFile(idf)
+#
+#     obj.save()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+import pandas as pd
+
+
+[docs] +class generateLatexGraphFile(): + """ + :Description: GenerateLatexGraphFile is used to convert the given data into LatexGraphFile. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.syntheticDataGenerator import generateLatexGraphFile as fuz + + obj = fuz.generateLatexGraphFile(idf) + + obj.save() + + """
+ + + + +
+[docs] +def generateLatexCode(result: pd.DataFrame) -> None: + + titles = result.columns.tolist() + titles.remove("minsup") + titles.remove("algorithm") + for i in range(0, len(titles)): + legendary = pd.unique(result[['algorithm']].values.ravel()) + color = ['red', 'blue', 'green', 'black', 'yellow'] + xaxis = result["minsup"].values.tolist() + yaxis = result[titles[i]].values.tolist() + algo = result["algorithm"].values.tolist() + x_label = "minsup" + filename = titles[i] + latexwriter = open(filename + "Latexfile.tex", "w") + latexwriter.write("") + latexwriter.write("\\begin{axis}[\n\txlabel={\\Huge{" + x_label + "}},") + latexwriter.write("\n\tylabel={\\Huge{" + titles[i] + "}},") + latexwriter.write("\n\txmin=" + str(min(xaxis)) + ", xmax=" + str(max(xaxis)) + ",") + + for num in range(0, len(legendary)): + latexwriter.write("\n\\addplot+ [" + color[num] + "]\n\tcoordinates {\n") + for num2 in range(0, len(xaxis)): + if (legendary[num] == algo[num2]): + latexwriter.write("(" + str(xaxis[num2]) + "," + str(yaxis[num2]) + ")\n") + latexwriter.write("\t}; \\addlegendentry{" + legendary[num] + "}\n") + if (num + 1 == len(legendary)): + latexwriter.write("\\end{axis}") + print("Latex files generated successfully")
+ + #data1 = pd.DataFrame(data) + #generateLatexCode(data1) + +if __name__ == "__main__": + + + #data = {'Name': ['Jai', 'Princi', 'Gaurav', 'Anuj'], + #'Age': [27, 24, 22, 32], + #'Address': [0, 1, 2, 3], + #'Qualification': [8, 9, 10, 11]} + '''data = {'algorithm': ['FGPFPMiner','FGPFPMiner','FGPFPMiner','FGPFPMiner','FGPFPMiner','FGPFPMiner','FGPFPMiner' + ,'Naive algorithm','Naive algorithm','Naive algorithm','Naive algorithm','Naive algorithm','Naive algorithm' + ,'Naive algorithm', ], + 'minsup': [200,400,600,800,1000,1200,1400,200,400,600,800,1000,1200,1400], + 'patterns': [25510,5826,2305,1163,657,407,266,101938,16183,5027,2091,1044,574,335], + 'runtime': [1077.7172002792358,298.6219701766968,186.86728835105896,126.96730422973633 + ,77.39371657371521,64.73982691764832,46.879486083984375,13175.030002832413,1821.2089745998383 + ,964.6961390972137,637.1588702201843,350.71105194091797,275.9953947067261,195.6615695953369], + 'memoryRSS': [164634624,159494144,157622272,156184576,153698304,150597632,149381120,228220928,192770048 + ,185114624,182939648,178253824,176115712,171659264], + 'memoryUSS': [144310272,139104256,137232384,135794688,133300224,130195456,128978944, + 203337728,172376064,164720640,162545664,157859840,155721728,151265280] + }''' + data = { + 'algorithm': ['FGPFPMiner', 'FGPFPMiner', 'FGPFPMiner', 'FGPFPMiner', 'FGPFPMiner', 'FGPFPMiner', 'FGPFPMiner'], + 'minsup': [200, 400, 600, 800, 1000, 1200, 1400], + 'patterns': [25510, 5826, 2305, 1163, 657, 407, 266], + 'runtime': [1077.7172002792358, 298.6219701766968, 186.86728835105896, 126.96730422973633 + , 77.39371657371521, 64.73982691764832, 46.879486083984375], + 'memoryRSS': [164634624, 159494144, 157622272, 156184576, 153698304, 150597632, 149381120], + 'memoryUSS': [144310272, 139104256, 137232384, 135794688, 133300224, 130195456, 128978944] + } + + data1 = pd.DataFrame(data) + #print(data1) + #print(data1['Name'].values.tolist()) + generateLatexCode(data1) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/graph/DF2Fig.html b/sphinx/_build/html/_modules/PAMI/extras/graph/DF2Fig.html new file mode 100644 index 000000000..8d52b37e6 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/graph/DF2Fig.html @@ -0,0 +1,217 @@ + + + + + + PAMI.extras.graph.DF2Fig — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for PAMI.extras.graph.DF2Fig

+# dataFrameInToFigures is used to convert the given dataframe into figures.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#     from PAMI.extras.graph import DF2Fig as fig
+#
+#     obj = fig.DF2Fig(idf)
+#
+#     obj.plotGraphsFromDataFrame("minSup", "patterns")
+#
+#     obj.plotGraphsFromDataFrame("minSup", "memory")
+#
+#     obj.plotGraphsFromDataFrame("minSup", "runtime")
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+     
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import sys
+
+import plotly.express as _px
+import pandas as _pd
+
+
+[docs] +class DF2Fig(): + """ + + :Description: DataFrameInToFigures is used to convert the given dataframe into figures. + + :param dataFrame: + Name of the input dataframe + + :param algorithm: + Specify the column name containing the algorithms + + :param xcolumn: + Specify the name of the X-axis + + :param ycolumn: + Specify the name of the Y-axis + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.graph import dataframeInToFigures as fig + + obj = fig.dataframeInToFigures(idf) + + obj.plotGraphsFromDataFrame("minSup", "patterns", "algorithms") + + obj.plotGraphsFromDataFrame("minSup", "memory") + + obj.plotGraphsFromDataFrame("minSup", "runtime") + + """ + + def __init__(self, dataFrame: _pd.DataFrame) -> None: + self._dataFrame = dataFrame + +
+[docs] + def plot(self, xColumn, yColumn, algorithm=None) -> None: + """ + To plot graphs from given dataframe + + :param xColumn: Name of the X-axis of the dataframe + + :type xColumn: str + + :param yColumn: Name of the Y-axis of the dataframe + + :type yColumn: str + + :param algorithm: Specify the column name containing the algorithms + + :type algorithm: str + + :return: None + """ + if algorithm is None: + fig = _px.line(self._dataFrame, x=self._dataFrame[xColumn] , y=self._dataFrame[yColumn], color=self._dataFrame.iloc[:, 0], labels={'x': xColumn, 'y': yColumn}) + else: + fig = _px.line(self._dataFrame, x=self._dataFrame[xColumn], y=self._dataFrame[yColumn], + color=self._dataFrame[algorithm], labels={'x': xColumn, 'y': yColumn}) + + fig.show()
+
+ + + +# if __name__ == '__main__': +# ab = DF2Fig(result) +# # user can change x and y columns +# ab.plot("minSup", "patterns") + + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/graph/plotLineGraphFromDictionary.html b/sphinx/_build/html/_modules/PAMI/extras/graph/plotLineGraphFromDictionary.html new file mode 100644 index 000000000..2931b0eaf --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/graph/plotLineGraphFromDictionary.html @@ -0,0 +1,192 @@ + + + + + + PAMI.extras.graph.plotLineGraphFromDictionary — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.graph.plotLineGraphFromDictionary

+# plotLineGraphFromDictionary is used to convert the given dictionary into plotLineGraph.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#     from PAMI.extras.graph import plotLineGraphFromDictionary as plt
+#
+#     obj = plt.plotLineGraphFromDictionary(idict, 100, 0, " ")
+#
+#     obj.save()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import sys
+import matplotlib.pyplot as plt
+
+
+[docs] +class plotLineGraphFromDictionary: + """ + This class plot graph of input data + + :Attributes: + + :param data : dict: store input data as dict + + :Methods: + + plotLineGraph() + draw line graph of input data. input data's key is x and value is y. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.graph import plotLineGraphFromDictionary as plt + + obj = plt.plotLineGraphFromDictionary(idict, 100, 0, " ") + + obj.save() + """ + def __init__(self, data: dict, end: int=100, start: int=0, title: str='', xlabel: str='', ylabel: str='') -> None: + + """ + draw line graph. Plot the input data key as x and value as y + + :param end: end of graph to plot + :type end: int + :param start: start fo graph to plot + :type start: int + :param title: title of graph + :type title: str + :param xlabel: xlabel of graph + :type xlabel: str + :param ylabel: ylabel of grapth + :type ylabel: str + :return: None + """ + end = int(len(data) * end / 100) + start = int(len(data) * start / 100) + x = list(range(len(data))) + y = list(tuple(data.values())[start:end]) + fig, ax = plt.subplots() + ax.plot(x, y, marker='.') + ax.set_title(title) + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel)
+ + +if __name__ == '__main__': + obj = plotLineGraphFromDictionary(sys.argv[1]) + obj.plotLineGraphFromDictionary(sys.argv[2]) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/graph/plotLineGraphsFromDataFrame.html b/sphinx/_build/html/_modules/PAMI/extras/graph/plotLineGraphsFromDataFrame.html new file mode 100644 index 000000000..1d4213853 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/graph/plotLineGraphsFromDataFrame.html @@ -0,0 +1,207 @@ + + + + + + PAMI.extras.graph.plotLineGraphsFromDataFrame — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.graph.plotLineGraphsFromDataFrame

+# plotLineGraphFromDataFrame is used to convert the given dataframe into plotLineGraph.
+#
+#  **Importing this algorithm into a python program**
+#  --------------------------------------------------------
+#
+#     from PAMI.extras.graph import plotLineGraphsFromDataFrame as plt
+#
+#     obj = plt.plotLineGraphsFromDictionary(idf)
+#
+#     obj.save()
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import matplotlib.pyplot as plt
+import pandas as _pd
+import sys
+
+
+[docs] +class plotGraphsFromDataFrame(): + """ + plotLineGraphFromDataFrame is used to convert the given dataframe into plotLineGraph. + + :Attributes: + + :param dataFrame : DataFrame + store input data as DataFrame + + :Methods: + + plotLineGraphFromDatFrame() + draw line graph of input data. input data's key is x and value is y. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.graph import plotLineGraphsFromDataframe as plt + + obj = plt.plotLineGraphsFromDataFrame(idf) + + obj.save() + """ + + def __init__(self, dataFrame: _pd.DataFrame) -> None: + + self._dataFrame = dataFrame + +
+[docs] + def plotGraphsFromDataFrame(self) -> None: + self._dataFrame.plot(x='minSup', y='patterns', kind='line') + plt.show() + print('Graph for No Of Patterns is successfully generated!') + self._dataFrame.plot(x='minSup', y='runtime', kind='line') + plt.show() + print('Graph for Runtime taken is successfully generated!') + self._dataFrame.plot(x='minSup', y='memory', kind='line') + plt.show() + print('Graph for memory consumption is successfully generated!')
+
+ + + + + +if __name__ == '__main__': + #data = {'algorithm': ['FPGrowth','FPGrowth', 'FPGrowth', 'FPGrowth', 'FPGrowth', 'ECLAT', 'ECLAT', 'ECLAT', 'ECLAT', 'ECLAT'], + # 'minSup': [0.01, 0.02, 0.03, 0.04, 0.05, 0.01, 0.02, 0.03, 0.04, 0.05], + # 'patterns': [386, 155, 60, 36, 10, 386, 155, 60, 26, 10], + # 'runtime': [7.351629, 4.658654 , 4.658654 , 1.946843, 1.909376, 4.574833, 2.514252, 1.834948, 1.889892, 1.809999], + # 'memory': [426545152, 309182464, 241397760, 225533952, 220950528, 233537536, 267165696, 252841984, 245690368, + # 295710720] + # } + data = { + 'algorithm': ['FPGrowth', 'FPGrowth', 'FPGrowth', 'FPGrowth', 'FPGrowth'], + 'minSup': [0.01, 0.02, 0.03, 0.04, 0.05], + 'patterns': [386, 155, 60, 36, 10], + 'runtime': [7.351629, 4.658654, 4.658654, 1.946843, 1.909376], + 'memory': [426545152, 309182464, 241397760, 225533952, 220950528] + } + dataFrame = _pd.DataFrame(data) + ab = plotGraphsFromDataFrame(dataFrame) + ab.plotGraphsFromDataFrame() + obj = plotGraphsFromDataFrame(sys.argv[1]) + obj.plotGraphsFromDataFrame(sys.argv[2]) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/graph/visualizeFuzzyPatterns.html b/sphinx/_build/html/_modules/PAMI/extras/graph/visualizeFuzzyPatterns.html new file mode 100644 index 000000000..038234243 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/graph/visualizeFuzzyPatterns.html @@ -0,0 +1,251 @@ + + + + + + PAMI.extras.graph.visualizeFuzzyPatterns — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.graph.visualizeFuzzyPatterns

+# visualizeFuzzyPatterns is used to visualize points produced by pattern miner .
+#
+#  **Importing this algorithm into a python program**
+#  --------------------------------------------------------
+#
+#     from PAMI.extras.graph import visualizeFuzzyPatterns as viz
+#
+#     obj = viz.visualizeFuzzyPatterns(iFile, topk)
+#
+#     obj.save()
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+# from PAMI.extras.graph import visualizePatterns as fig
+
+# obj = fig.visualizePatterns('soramame_frequentPatterns.txt',50)
+# obj.visualize(width=1000,height=900)
+
+import plotly.express as px
+import pandas as pd
+import sys
+
+
+
+[docs] +class visualizeFuzzyPatterns(): + """ + + :Description: visualizeFuzzyPatterns is used to visualize points produced by pattern miner . + :Attributes: + + :param file : file: store input data as file + :param topk : int: Takes the value int as input + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.graph import visualizeFuzzyPatterns as viz + + obj = viz.visualizeFuzzyPatterns(iFile, topk) + + obj.save() + """ + + def __init__(self, file: str, topk: int) -> None: + self.file = file + self.topk = topk + +
+[docs] + def visualize(self, markerSize: int = 20, zoom: int = 3, width: int = 1500, height: int = 1000) -> None: + """ + Visualize points produced by pattern miner. + + :param markerSize: Size of the marker + :type markerSize: int + :param zoom: Zoom level + :type zoom: int + :param width: Width of the graph + :type width: int + :param height: Height of the graph on the screen + :type width: int + :return: None + """ + + long = [] + lat = [] + name = [] + color = [] + R = G = B = 0 + + lines = {} + with open(self.file, "r") as f: + for line in f: + lines[line] = len(line) + + lines = list(dict(sorted(lines.items(), key=lambda x: x[1])[-self.topk:]).keys()) + + start = 1 + + print("Number \t Pattern") + for line in lines: + + start += 1 + if start % 3 == 0: + R += 20 + if start % 3 == 1: + G += 20 + if start % 3 == 2: + B += 20 + if R > 255: + R = 0 + if G > 255: + G = 0 + if B > 255: + B = 0 + RHex = hex(R)[2:] + GHex = hex(G)[2:] + BHex = hex(B)[2:] + line = line.split(":") + freq = line[-1] + freq = "Frequency: " + freq.strip() + line = line[:-1] + print(str(start) + "\t" + line[0]) + points = line[0].split("\t") + points = [x for x in points if x != ""] + points = [x.strip("Point())") for x in points] + for i in range(len(points)): + rrr = points[i][8:29] + temp = rrr.split() + temp = [i.strip("()") for i in temp] + lat.append(float(temp[0])) + long.append(float(temp[1])) + name.append(freq) + color.append("#" + RHex + GHex + BHex) + df = pd.DataFrame({"lon": long, "lat": lat, "freq": name, "col": color}) + + fig = px.scatter_mapbox(df, lat="lon", lon="lat", hover_name="freq", color="col", zoom=zoom, width=width, + height=height) + fig.update_layout(mapbox_style="open-street-map") + fig.update_traces({'marker': {'size': markerSize}}) + fig.show()
+
+ + + +if __name__ == "__main__": + _ap = str() + _ap = visualizeFuzzyPatterns('soramame_frequentPatterns.txt', 10) + _ap.visualize() +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/graph/visualizePatterns.html b/sphinx/_build/html/_modules/PAMI/extras/graph/visualizePatterns.html new file mode 100644 index 000000000..7e601fcaa --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/graph/visualizePatterns.html @@ -0,0 +1,250 @@ + + + + + + PAMI.extras.graph.visualizePatterns — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.graph.visualizePatterns

+# visualizePatterns is used to visualize points produced by pattern miner .
+#
+#  **Importing this algorithm into a python program**
+#  --------------------------------------------------------
+#
+#     from PAMI.extras.graph import visualizePatterns as viz
+#
+#     obj = viz.visualizePatterns(iFile, topk)
+#
+#     obj.save()
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import plotly.express as px
+import pandas as pd
+import sys
+
+
+
+[docs] +class visualizePatterns(): + """ + + :Description: visualizePatterns is used to visualize points produced by pattern miner . + + :Attributes: + + :param file : file + store input data as file + :param topk : int + Takes the value int as input + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.graph import visualizePatterns as viz + + obj = viz.visualizePatterns(iFile, topk) + + obj.save() + + """ + + def __init__(self, file: str, topk: int) -> None: + self.file = file + self.topk = topk + +
+[docs] + def visualize(self, markerSize: int = 20, zoom: int = 3, width: int = 1500, height: int = 1000) -> None: + """ + Visualize points produced by pattern miner. + + :param markerSize: Size of the marker + :type markerSize: int + :param zoom: Zoom level + :type zoom: int + :param width: Width of the graph + :type width: int + :param height: Height of the graph on the screen + :type width: int + :return: None + """ + + long = [] + lat = [] + name = [] + color = [] + R = G = B = 0 + + lines = {} + with open(self.file, "r") as f: + for line in f: + lines[line] = len(line) + + lines = list(dict(sorted(lines.items(), key=lambda x: x[1])[-self.topk:]).keys()) + + start = 1 + + print("Number \t Pattern") + for line in lines: + + start += 1 + if start % 3 == 0: + R += 20 + if start % 3 == 1: + G += 20 + if start % 3 == 2: + B += 20 + if R > 255: + R = 0 + if G > 255: + G = 0 + if B > 255: + B = 0 + RHex = hex(R)[2:] + GHex = hex(G)[2:] + BHex = hex(B)[2:] + + line = line.split(":") + freq = line[-1] + freq = "Frequency: " + freq.strip() + line = line[:-1] + print(str(start) + "\t" + line[0]) + points = line[0].split("\t") + points = [x for x in points if x != ""] + points = [x.strip("Point()") for x in points] + for i in range(len(points)): + temp = points[i].split() + lat.append(float(temp[0])) + long.append(float(temp[1])) + name.append(freq) + color.append("#" + RHex + GHex + BHex) + df = pd.DataFrame({"lon": long, "lat": lat, "freq": name, "col": color}) + + fig = px.scatter_mapbox(df, lat="lon", lon="lat", hover_name="freq", color="col", zoom=zoom, width=width, + height=height) + fig.update_layout(mapbox_style="open-street-map") + fig.update_traces({'marker': {'size': markerSize}}) + fig.show()
+
+ + + +if __name__ == "__main__": + _ap = str() + _ap = visualizePatterns(sys.argv[1], sys.argv[2]) + _ap.visualize() +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/imageProcessing/imagery2Databases.html b/sphinx/_build/html/_modules/PAMI/extras/imageProcessing/imagery2Databases.html new file mode 100644 index 000000000..85c0b8707 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/imageProcessing/imagery2Databases.html @@ -0,0 +1,293 @@ + + + + + + PAMI.extras.imageProcessing.imagery2Databases — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.imageProcessing.imagery2Databases

+# imagery2Databases is a code used to creating transactional database by applying threshold
+#
+#  **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#     from PAMI.extras.imageProcessing import imagery2Databases as db
+#
+#     obj = db.imagery2Databases(detected_objects, 16 )
+#
+#     obj.save()
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import pandas as pd
+import sys
+
+
+# creating transactional database by applying threshold
+
+[docs] +class createDatabase: + """ + + :Description: imagery2Databases is a code used to creating transactional database by applying threshold + + :param detected_objects: list : + List data to be processed + :param threshold: int : + It is threshold value of all item + + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.imageProcessing import imagery2Databases as db + + obj = db.imagery2Databases(detected_obj, 16 ) + + obj.save(oFile) + + """ + + # pass the list of detected objects and specify the minimum probability score an object must maintain. + def __init__(self, detected_objects: list, threshold: float): + # initialize data frame to store objects + self.dataframe = pd.DataFrame(columns=['objects']) + self.threshold = threshold + self.itemList = [] + self.probabilityValuesList = [] + self.detected_objects = detected_objects + self.itemSupport = [] + self.itemSupportSum = [] # pruning the objects having scores less than threshold value + for objectList in self.detected_objects: + supportSum = 0 + dataDic = {} + self.items = [] + self.values = [] + self.supports = [] + for item in objectList: + supportSum = supportSum + 1 + if item[1] >= self.threshold: + if item[0] not in dataDic.keys(): + dataDic[item[0]] = [item[1]] + else: + dataDic[item[0]].append(item[1]) + # storing objects,their probabilities and count + self.items = [item for item in dataDic.keys()] + self.values = [max(value) for value in dataDic.values()] + self.supports = [len(value) for value in dataDic.values()] + + self.itemSupportSum.append(supportSum) + self.itemList.append(self.items) + self.probabilityValuesList.append(self.values) + self.itemSupport.append(self.supports) + self.dataframe.loc[self.dataframe.shape[0], 'objects'] = dataDic.keys() + +
+[docs] + def getDataFrame(self) -> pd.DataFrame: + return self.dataframe
+ + + # This function will save the list of objects found in each image as a transactional database. + + # creating transactional database +
+[docs] + def saveAsTransactionalDB(self, outputFile: str, sep: str) -> None: + writeFile = open(outputFile, 'w') + for i in range(len(self.itemList)): + if self.itemList[i]: + writeLine = sep.join(map(str, self.itemList[i])) + writeFile.write(writeLine + '\n') + writeFile.close()
+ + + # creating temporal database +
+[docs] + def saveAsTemporalDB(self, outputFile: str, sep: str): + writeFile = open(outputFile, 'w') + + for i in range(len(self.itemList)): + if self.itemList[i]: + writeLine = sep.join(map(str, self.itemList[i])) + writeFile.write(str(i) + sep + writeLine + '\n') + + writeFile.close()
+ + + # creating utility transactional database + +
+[docs] + def saveAsUtilityTransactionalDB(self, outputFile: str, sep: str) -> None: + writeFile = open(outputFile, 'w') + for i in range(len(self.itemList)): + if self.itemList[i]: + writeLine = sep.join(map(str, self.itemList[i])) + writeLine2 = sep.join(map(str, self.itemSupport[i])) + writeFile.write(writeLine + ':' + str(self.itemSupportSum[i]) + ':' + writeLine2 + '\n') + writeFile.close()
+ + + # creating utility temporal database + +
+[docs] + def saveAsUtilityTemporalDB(self, outputFile: str, sep: str) -> None: + writeFile = open(outputFile, 'w') + for i in range(len(self.itemList)): + if self.itemList[i]: + writeLine = sep.join(map(str, self.itemList[i])) + writeLine2 = sep.join(map(str, self.itemSupport[i])) + writeFile.write( + str(i) + str(sep) + writeLine + ':' + str(self.itemSupportSum[i]) + ':' + writeLine2 + '\n') + writeFile.close()
+ + + # creating uncertain transactional database + +
+[docs] + def saveAsUncertainTransactionalDB(self, outputFile: str, sep: str) -> None: + writeFile = open(outputFile, 'w') + for i in range(len(self.itemList)): + if self.itemList[i]: + writeLine = sep.join(map(str, self.itemList[i])) + writeLine2 = sep.join(map(str, self.probabilityValuesList[i])) + writeFile.write(writeLine + ":1:" + writeLine2 + '\n') + writeFile.close()
+ + + # creating uncertain Temporal database + +
+[docs] + def saveAsUncertainTemporalDB(self, outputFile: str, sep: str) -> None: + writeFile = open(outputFile, 'w') + for i in range(len(self.itemList)): + if self.itemList[i]: + writeLine = sep.join(map(str, self.itemList[i])) + writeLine2 = sep.join(map(str, self.probabilityValuesList[i])) + writeFile.write(str(i) + str(sep) + writeLine + ":1:" + writeLine2 + '\n') + writeFile.close()
+
+ + +if __name__ == '__main__': + obj = createDatabase(sys.argv[1], sys.argv[2]) + obj.saveAsUncertainTemporalDB(sys.argv[3]) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/messaging/discord.html b/sphinx/_build/html/_modules/PAMI/extras/messaging/discord.html new file mode 100644 index 000000000..82b68a25b --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/messaging/discord.html @@ -0,0 +1,130 @@ + + + + + + PAMI.extras.messaging.discord — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.messaging.discord

+
+# Create WebHook for the Discord Channel: https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks
+
+# Copy the discord.Webhook.from_url
+
+# Send messages
+
+from discord import SyncWebhook
+
+
+
+[docs] +class discord(): + + def __init__(self, url: str) -> None: + self.url = url + + +
+[docs] + def send(self, message: str) -> None: + try: + webhook = SyncWebhook.from_url(self.url) + webhook.send(message) + except Exception as e: + # Print any error messages to stdout + print(e)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/messaging/gmail.html b/sphinx/_build/html/_modules/PAMI/extras/messaging/gmail.html new file mode 100644 index 000000000..08dc07710 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/messaging/gmail.html @@ -0,0 +1,157 @@ + + + + + + PAMI.extras.messaging.gmail — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for PAMI.extras.messaging.gmail

+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import sys
+import smtplib, ssl
+
+
+from email.message import EmailMessage
+
+
+[docs] +class gmail(): + + def __init__(self, userName: str, password: str) -> None: + self.userName = userName + self.password = password + + +
+[docs] + def send(self, toAddress: str, subject: str, body: str) -> None: + smtp_server = smtplib.SMTP('smtp.gmail.com', 587) + try: + + smtp_server.starttls() + smtp_server.login(self.userName, self.password) + + message = EmailMessage() + message.set_content(body) + + message['Subject'] = subject + message['From'] = self.userName + message['To'] = toAddress + + smtp_server.send_message(message) + except Exception as e: + # Print any error messages to stdout + print(e) + finally: + smtp_server.quit()
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/neighbours/findNeighborsUsingEuclideanDistanceforPointInfo.html b/sphinx/_build/html/_modules/PAMI/extras/neighbours/findNeighborsUsingEuclideanDistanceforPointInfo.html new file mode 100644 index 000000000..631ac082d --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/neighbours/findNeighborsUsingEuclideanDistanceforPointInfo.html @@ -0,0 +1,232 @@ + + + + + + PAMI.extras.neighbours.findNeighborsUsingEuclideanDistanceforPointInfo — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.neighbours.findNeighborsUsingEuclideanDistanceforPointInfo

+# findNeighboursUsingEuclidean is a code used to create a neighbourhood file using Euclidean distance.
+#
+#  **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#     from PAMI.extras.neighbours import findNeighboursUsingEuclidean as db
+#
+#     obj = db.findNeighboursUsingGeodesic(iFile, oFile, 10, "\t")
+#
+#     obj.save()
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import sys
+import re
+from math import sqrt
+
+
+[docs] +class createNeighborhoodFileUsingEuclideanDistance: + """ + This class create a neighbourhood file using euclid distance. + + :Attribute: + + :param iFile : file + Input file name or path of the input file + :param oFile : file + Output file name or path pf the output file + :param maxEuclideanDistance : int + The user can specify maxEuclideanDistance. + This program find pairs of values whose Euclidean distance is less than or equal to maxEucledianDistace + and store the pairs. + :param seperator: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Methods: + + mine() + find and store the pairs of values whose Euclidean distance is less than or equal to maxEucledianDistace. + getFileName() + This function returns output file name. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.neighbours import findNeighboursUsingEuclidean as db + + obj = db.findNeighboursUsingEuclidean(iFile, oFile, 10, "\t") + + obj.save() + """ + + def __init__(self,iFile: str,oFile: str,maxEucledianDistance: int, seperator='\t') -> None: + self.iFile = iFile + self.oFile = oFile + self.maxEucledianDistance = maxEucledianDistance + self.uniqueItems = [] + coordinates = [] + result = {} + with open(self.iFile,"r") as f: + for line in f: + l = [i for i in line.split(":")] + l[0] = l[0].rstrip().split(seperator) + l[0] = [i for i in l[0] if i] + for i in l[0]: + if i[-1] == '1': + i = i[0:-2] + if i not in self.uniqueItems: + self.uniqueItems.append(i) + for i in self.uniqueItems: + i = i.strip("Point()") + coordinates.append(i.rstrip().split()) + #print("Total number of unique coordinates in the given file: "len(coordinates)) + for i in range(len(coordinates)): + for j in range(len(coordinates)): + if i != j: + firstCoordinate = coordinates[i] + secondCoordinate = coordinates[j] + x1 = float(firstCoordinate[0]) + y1 = float(firstCoordinate[1]) + x2 = float(secondCoordinate[0]) + y2 = float(secondCoordinate[1]) + ansX = x2-x1 + ansY = y2-y1 + dist = abs(pow(ansX,2) - pow(ansY,2)) + norm = sqrt(dist) + if norm <= float(self.maxEucledianDistance): + result[tuple(firstCoordinate)] = result.get(tuple(firstCoordinate),[]) + result[tuple(firstCoordinate)].append(secondCoordinate) + + with open(self.oFile,"w+") as f: + for i in result: + string = "Point(" +i[0]+" "+i[1] + ")"+ seperator + f.write(string) + for j in result[i]: + string = "Point(" + j[0] + " " + j[1] + ")"+ seperator + f.write(string) + f.write("\n") + + +
+[docs] + def getFileName(self) -> str: + return self.oFile
+
+ + +if __name__ == "__main__": + obj = createNeighborhoodFileUsingEuclideanDistance(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/neighbours/findNeighboursUsingEuclidean.html b/sphinx/_build/html/_modules/PAMI/extras/neighbours/findNeighboursUsingEuclidean.html new file mode 100644 index 000000000..a8c90e5b5 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/neighbours/findNeighboursUsingEuclidean.html @@ -0,0 +1,224 @@ + + + + + + PAMI.extras.neighbours.findNeighboursUsingEuclidean — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.neighbours.findNeighboursUsingEuclidean

+# findNeighboursUsingEuclidean is a code used to create a neighbourhood file using Euclidean distance.
+#
+#  **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#     from PAMI.extras.neighbours import findNeighboursUsingEuclidean as db
+#
+#     obj = db.findNeighboursUsingGeodesic(iFile, oFile, 10, "\t")
+#
+#     obj.save()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import sys
+import re
+from math import sqrt
+
+
+[docs] +class createNeighborhoodFileUsingEuclideanDistance: + """ + This class create a neighbourhood file using euclid distance. + + :Attribute: + + :param iFile : file + Input file name or path of the input file + :param oFile : file + Output file name or path pf the output file + :param maxEuclideanDistance : int + The user can specify maxEuclideanDistance. + This program find pairs of values whose Euclidean distance is less than or equal to maxEucledianDistace + and store the pairs. + :param seperator: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Methods: + + mine() + find and store the pairs of values whose Euclidean distance is less than or equal to maxEucledianDistace. + getFileName() + This function returns output file name. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.neighbours import findNeighboursUsingEuclidean as db + + obj = db.findNeighboursUsingEuclidean(iFile, oFile, 10, "\t") + + obj.save() + """ + + def __init__(self,iFile: str,oFile: str,maxEucledianDistance: int, seperator='\t') -> None: + self.iFile = iFile + self.oFile = oFile + self.maxEucledianDistance = maxEucledianDistance + + coordinates = [] + result = {} + with open(self.iFile,"r") as f: + for line in f: + l = line.rstrip().split(seperator) + #print(l) + l[0] = re.sub(r'[^0-9. ]', '', l[0]) + coordinates.append(l[0].rstrip().split(' ')) + #print(l[0]) + for i in range(len(coordinates)): + for j in range(len(coordinates)): + if i != j: + firstCoordinate = coordinates[i] + secondCoordinate = coordinates[j] + x1 = float(firstCoordinate[0]) + y1 = float(firstCoordinate[1]) + x2 = float(secondCoordinate[0]) + y2 = float(secondCoordinate[1]) + ansX = x2-x1 + ansY = y2-y1 + dist = abs(pow(ansX,2) - pow(ansY,2)) + norm = sqrt(dist) + if norm <= float(self.maxEucledianDistance): + result[tuple(firstCoordinate)] = result.get(tuple(firstCoordinate),[]) + result[tuple(firstCoordinate)].append(secondCoordinate) + + with open(self.oFile,"w+") as f: + for i in result: + string = "Point(" +i[0]+" "+i[1] + ")"+ seperator + f.write(string) + for j in result[i]: + string = "Point(" + j[0] + " " + j[1] + ")"+ seperator + f.write(string) + f.write("\n") + + +
+[docs] + def getFileName(self) -> str: + return self.oFile
+
+ + +if __name__ == "__main__": + obj = createNeighborhoodFileUsingEuclideanDistance(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/neighbours/findNeighboursUsingGeodesic.html b/sphinx/_build/html/_modules/PAMI/extras/neighbours/findNeighboursUsingGeodesic.html new file mode 100644 index 000000000..99be46859 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/neighbours/findNeighboursUsingGeodesic.html @@ -0,0 +1,225 @@ + + + + + + PAMI.extras.neighbours.findNeighboursUsingGeodesic — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.neighbours.findNeighboursUsingGeodesic

+# findNeighboursUsingGeodesic is a code used to create a neighbourhood file using Geodesic distance.
+#
+#  **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#     from PAMI.extras.neighbours import findNeighboursUsingGeodesic as db
+#
+#     obj = db.findNeighboursUsingGeodesic(iFile, oFile, 10, "\t")
+#
+#     obj.save()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+import sys
+import re
+from math import sqrt
+from geopy.distance import geodesic
+
+
+
+[docs] +class createNeighborhoodFileUsingGeodesicDistance: + """ + This class create a neighbourhood file using Geodesic distance. + + :Attribute: + + :param iFile : file + Input file name or path of the input file + :param oFile : file + Output file name or path pf the output file + :param maxDistance : float + The user can specify maxDistance in Km(Kilometers). + This program find pairs of values whose Geodesic distance is less than or equal to maxDistace + and store the pairs. + :param seperator: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Methods: + + mine() + find and store the pairs of values whose Geodesic distance is less than or equal to maxDistace. + getFileName() + This function returns output file name. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.neighbours import findNeighboursUsingGeodesic as db + + obj = db.findNeighboursUsingGeodesic(iFile, oFile, 10, "\t") + + obj.save() + """ + + def __init__(self, iFile: str, oFile: str, maxDistance: float, seperator='\t'): + self.iFile = iFile + self.oFile = oFile + self.maxDistance = maxDistance + + coordinates = [] + result = {} + with open(self.iFile, "r") as f: + for line in f: + l = line.rstrip().split(seperator) + # print(l) + l[2] = re.sub(r'[^0-9. ]', '', l[2]) + coordinates.append(l[2].rstrip().split(' ')) + # print(l[0]) + for i in range(len(coordinates)): + for j in range(len(coordinates)): + if i != j: + firstCoordinate = coordinates[i] + secondCoordinate = coordinates[j] + long1 = float(firstCoordinate[0]) + lat1 = float(firstCoordinate[1]) + long2 = float(secondCoordinate[0]) + lat2 = float(secondCoordinate[1]) + + dist = geodesic((lat1, long1), (lat2, long2)).kilometers + + if dist <= float(self.maxDistance): + result[tuple(firstCoordinate)] = result.get(tuple(firstCoordinate), []) + result[tuple(firstCoordinate)].append(secondCoordinate) + + with open(self.oFile, "w+") as f: + for i in result: + string = "Point(" + i[0] + " " + i[1] + ")" + seperator + f.write(string) + for j in result[i]: + string = "Point(" + j[0] + " " + j[1] + ")" + seperator + f.write(string) + f.write("\n") + +
+[docs] + def getFileName(self): + return self.oFile
+
+ + + +if __name__ == "__main__": + obj = createNeighborhoodFileUsingGeodesicDistance(sys.argv[1], sys.argv[2], sys.argv[4]) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/plotPointOnMap.html b/sphinx/_build/html/_modules/PAMI/extras/plotPointOnMap.html new file mode 100644 index 000000000..f361c3d1b --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/plotPointOnMap.html @@ -0,0 +1,242 @@ + + + + + + PAMI.extras.plotPointOnMap — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for PAMI.extras.plotPointOnMap

+# plotPointOnMap is used to take the input patterns and plot the points on map.
+#
+#     **Importing this algorithm into a python program**
+#     --------------------------------------------------------
+#
+#     from PAMI.extras.syntheticDataGenerator import plotPointOnMap as plt
+#
+#     obj = plt.plotPointOnMap(" ", 10, "\t")
+#
+#     obj.save()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import folium
+import pandas as pd
+import re
+from typing import List, Tuple
+
+
+[docs] +class plotPointOnMap: + """ + :Description: plotPointOnMap is used to take the input patterns and plot the points on map + + :param inputPatterns: str : + Name of the Input file + :param k: str : + Name of the FuzFile to process set of data. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.syntheticDataGenerator import plotPointOnMap as plt + + obj = plt.plotPointOnMap(" ", 10, "\t") + + obj.save() + """ + + def __init__(self, inputPatterns: str, k: int=10, sep: str='\t') ->None: + self.inputPatterns = inputPatterns + self.k = k + self.sep = sep + +
+[docs] + def findTopKPatterns(self) -> List[List[str]]: + Database = [] + if isinstance(self.inputPatterns, pd.DataFrame): + patterns = [] + i = self.inputPatterns.columns.values.tolist() + if 'Transactions' in i: + patterns = self.inputPatterns['Patterns'].tolist() + for pattern in patterns: + if isinstance(pattern, str): + pattern = [item for item in pattern.strip().split(self.sep)] + Database.append(pattern) + elif isinstance(self.inputPatterns, dict): + for pattern in self.inputPatterns: + if isinstance(pattern, str): + pattern = [item for item in pattern.strip().split(self.sep)] + Database.append(pattern) + + elif isinstance(self.inputPatterns, str): + with open(self.inputPatterns, 'r') as f: + for line in f: + pattern = [s for s in line.strip().split(':')][0] + pattern = [item for item in pattern.strip().split(self.sep)] + Database.append(pattern) + + patterns = sorted(Database, key=lambda x: len(x), reverse=True) + # return {patternId: patterns[patternId - 1] for patternId in range(1, int(self.k) + 1)} + return patterns[:self.k]
+ + +
+[docs] + def convertPOINT(self, patterns: List[List[str]]) -> pd.DataFrame: + locations = pd.DataFrame(columns=['patternId', 'latitude', 'longitude']) + patternId = 1 + for pattern in patterns: + for item in pattern: + location = item.split(' ') + latitude = re.sub('[^0-9. ]', '', location[0]) + longitude = re.sub('[^0-9. ]', '', location[1]) + df = pd.DataFrame([patternId, latitude, longitude], index=locations.columns).T + locations = locations.append(df, ignore_index=True) + patternId += 1 + return locations
+ + + + +
+[docs] + def plotPointInMap(self) -> folium.Map: + topKPatterns = self.findTopKPatterns() + df = self.convertPOINT(topKPatterns) + mmap = folium.Map(location=[35.39, 139.44], zoom_start=4) + # df = pd.read_csv(inputFile) + colors = ['red', 'blue', 'green', 'purple', 'orange', 'darkred', 'beige', 'darkblue', 'darkgreen', + 'cadetblue', 'darkpurple', 'white', 'pink','gray', 'black'] + for i, row in df.iterrows(): + mmap.add_child(folium.CircleMarker( + location=[row['latitude'], row['longitude']], + popup=row['patternId'], + radius=3, + color=colors[int(row['patternId']) - 1], + fill=True, + fill_color=colors[int(row['patternId']) - 1], + )) + return mmap
+
+ + +if __name__ == '__main__': + obj = plotPointOnMap('visualizePatterns.csv') + mmap = obj.plotPointInMap() + mmap +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/plotPointOnMap_dump.html b/sphinx/_build/html/_modules/PAMI/extras/plotPointOnMap_dump.html new file mode 100644 index 000000000..e3059df3d --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/plotPointOnMap_dump.html @@ -0,0 +1,240 @@ + + + + + + PAMI.extras.plotPointOnMap_dump — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.plotPointOnMap_dump

+# plotPointOnMap_dump is used to take the input patterns and plot the points on map.
+#
+#     **Importing this algorithm into a python program**
+#     --------------------------------------------------------
+#
+#     from PAMI.extras.syntheticDataGenerator import plotPointOnMap_dump as plt
+#
+#     obj = plt.plotPointOnMap_dump(" ", 10, "\t")
+#
+#     obj.save()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import folium
+import pandas as pd
+from typing import Dict, List
+import re
+
+
+[docs] +class plotPointOnMap: + """ + :Description: plotPointOnMap is used to take the input patterns and plot the points on map + + :param inputPatterns: str : Name of the Input file + :param k: str : Name of the FuzFile to process set of data. + :param sep: str : This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.syntheticDataGenerator import plotPointOnMap as plt + + obj = plt.plotPointOnMap(" ", 10, "\t") + + obj.save() + + """ + def __init__(self, inputPatterns: str, k: int=10, sep: str='\t'): + self.inputPatterns = inputPatterns + self.k = k + self.sep = sep + +
+[docs] + def findTopKPatterns(self) -> List[List[str]]: + Database = [] + if isinstance(self.inputPatterns, pd.DataFrame): + patterns = [] + i = self.inputPatterns.columns.values.tolist() + if 'Transactions' in i: + patterns = self.inputPatterns['Patterns'].tolist() + for pattern in patterns: + if isinstance(pattern, str): + pattern = [item for item in pattern.strip().split(self.sep)] + Database.append(pattern) + elif isinstance(self.inputPatterns, dict): + for pattern in self.inputPatterns: + if isinstance(pattern, str): + pattern = [item for item in pattern.strip().split(self.sep)] + Database.append(pattern) + + elif isinstance(self.inputPatterns, str): + with open(self.inputPatterns, 'r') as f: + for line in f: + pattern = [s for s in line.strip().split(':')][0] + pattern = [item for item in pattern.strip().split(self.sep)] + Database.append(pattern) + + patterns = sorted(Database, key=lambda x: len(x[0]), reverse=True) + # return {patternId: patterns[patternId - 1] for patternId in range(1, int(self.k) + 1)} + return patterns[:self.k]
+ + +
+[docs] + def convertPOINT(self, patterns: List[List[str]]) -> pd.DataFrame: + locations = pd.DataFrame(columns=['patternId', 'latitude', 'longitude']) + patternId = 1 + for pattern in patterns: + for item in pattern: + location = item.split(' ') + longitude = re.sub('[^0-9. ]', '', location[0]) + latitude = re.sub('[^0-9. ]', '', location[1]) + df = pd.DataFrame([patternId, latitude, longitude], index=locations.columns).T + locations = locations.append(df, ignore_index=True) + patternId += 1 + return locations
+ + + + +
+[docs] + def plotPointInMap(self) -> folium.Map: + topKPatterns = self.findTopKPatterns() + df = self.convertPOINT(topKPatterns) + mmap = folium.Map(location=[35.39, 139.44], zoom_start=5) + # df = pd.read_csv(inputFile) + colors = ['red', 'blue', 'green', 'purple', 'orange', 'darkred', 'lightred', 'beige', 'darkblue', 'darkgreen', + 'cadetblue', 'darkpurple', 'white', 'pink', 'lightblue', 'lightgreen', 'gray', 'black', 'lightgray'] + for i, row in df.iterrows(): + folium.CircleMarker( + location=(row['latitude'], row['longitude']), + popup=row['patternId'], + radius=3, + # icon=folium.Icon(color=colors[int(row['patternId'])-1]) + color=colors[int(row['patternId']) - 1], + fill=True, + fill_color=colors[int(row['patternId']) - 1], + ).add_to(mmap) + return mmap
+
+ + +if __name__ == '__main__': + obj = plotPointOnMap('/Users/nakamura0803/medicalDataAnalytics/test/disease/pattern_8842163_0.8.txt') + mmap = obj.plotPointInMap() + mmap.save('map.html') +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/scatterPlotSpatialPoints.html b/sphinx/_build/html/_modules/PAMI/extras/scatterPlotSpatialPoints.html new file mode 100644 index 000000000..caf06b0fa --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/scatterPlotSpatialPoints.html @@ -0,0 +1,221 @@ + + + + + + PAMI.extras.scatterPlotSpatialPoints — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.scatterPlotSpatialPoints

+# scatterPlotSpatialPoints is used to convert the given data and plot the points.
+#
+#   **Importing this algorithm into a python program**
+#   --------------------------------------------------------
+#
+#   from PAMI.extras.syntheticDataGenerator import scatterPlotSpatialPoints as plt
+#
+#   obj = plt.scatterPlotSpatialPoints(iFile, "\t")
+#
+#   obj.save()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import matplotlib.pyplot as _plt
+import pandas as _pd
+from urllib.request import urlopen as _urlopen
+from typing import Dict, List
+
+
+
+[docs] +class scatterPlotSpatialPoints: + """ + + :Description: scatterPlotSpatialPoints is used to convert the given data and plot the points. + + :param iFile: str : Name of the Input file + :param sep: str : This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.syntheticDataGenerator import scatterPlotSpatialPoints as plt + + obj = plt.scatterPlotSpatialPoints(iFile, "\t" ) + + obj.save(oFile) + + """ + + def __init__(self, iFile: str, sep: str = '\t') ->None: + + self._iFile = iFile + self._sep = sep + + def _scanningPoints(self) -> Dict[str, str]: + + points = {} + if isinstance(self._iFile, _pd.DataFrame): + x, y = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'x' in i: + x = self._iFile['x'].tolist() + if 'y' in i: + y = self._iFile['y'].tolist() + for i in range(len(y)): + points[x[i]] = y[i] + + if isinstance(self._iFile, str): + if self._iFile.startswith(('http:', 'https:')): + data = _urlopen(self._iFile) + for line in data: + line = line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + points[temp[0]] = points[temp[1]] + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + points[temp[0]] = temp[1] + except IOError: + print("File Not Found") + quit() + return points + +
+[docs] + def scatterPlotSpatialPoints(self) -> None: + points = self._scanningPoints() + keys = [i for i in points.keys()] + values = [i for i in points.values()] + _plt.scatter(keys, values, c="Red") + _plt.xlabel("X-axis") + _plt.ylabel("Y-axis") + _plt.show() + print("Scatter Plot is generated")
+
+ + + +if __name__ == '__main__': + ab = scatterPlotSpatialPoints(iFile = '/Users/Likhitha/Downloads/spatial_T10I4D100K.csv', sep = ',') + ab.scatterPlotSpatialPoints() +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/stats/TransactionalDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/stats/TransactionalDatabase.html new file mode 100644 index 000000000..7318a23f8 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/stats/TransactionalDatabase.html @@ -0,0 +1,509 @@ + + + + + + PAMI.extras.stats.TransactionalDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.stats.TransactionalDatabase

+# Transactional Database is a class used to get stats of database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.stats import transactionalDatabase as db
+#
+#             obj = db.transactionalDatabase(iFile, "\t")
+#
+#             obj.save(oFile)
+#
+#             obj.run()
+#
+#             obj.printStats()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+import sys
+import statistics
+import pandas as pd
+import validators
+import numpy as np
+from urllib.request import urlopen
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+import PAMI.extras.graph.plotLineGraphFromDictionary as plt
+
+
+
+[docs] +class TransactionalDatabase: + """ + :Description: TransactionalDatabase is class to get stats of database. + + :Attributes: + + :param inputFile: file : + input file path + :param sep: str + separator in file. Default is tab space. + + :Methods: + + run() + execute readDatabase function + readDatabase() + read database from input file + getDatabaseSize() + get the size of database + getMinimumTransactionLength() + get the minimum transaction length + getAverageTransactionLength() + get the average transaction length. It is sum of all transaction length divided by database length. + getMaximumTransactionLength() + get the maximum transaction length + getStandardDeviationTransactionLength() + get the standard deviation of transaction length + getSortedListOfItemFrequencies() + get sorted list of item frequencies + getSortedListOfTransactionLength() + get sorted list of transaction length + save(data, outputFile) + store data into outputFile + getMinimumPeriod() + get the minimum period + getAveragePeriod() + get the average period + getMaximumPeriod() + get the maximum period + getStandardDeviationPeriod() + get the standard deviation period + getNumberOfTransactionsPerTimestamp() + get number of transactions per time stamp. This time stamp range is 1 to max period. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.stats import TransactionalDatabase as db + + obj = db.TransactionalDatabase(iFile, "\t") + + obj.save(oFile) + + obj.run() + + obj.printStats() + + """ + + def __init__(self, inputFile: Union[str, pd.DataFrame], sep: str = '\t') -> None: + """ + :param inputFile: input file name or path + :type inputFile: str + :param sep: separator + :type sep: str + :return: None + """ + self.inputFile = inputFile + self.lengthList = [] + self.sep = sep + self.database = {} + self.itemFrequencies = {} + +
+[docs] + def run(self) -> None: + """ + read database from input file and store into database and size of each transaction. + """ + # self.creatingItemSets() + numberOfTransaction = 0 + if isinstance(self.inputFile, pd.DataFrame): + if self.inputFile.empty: + print("its empty..") + i = self.inputFile.columns.values.tolist() + if 'tid' in i and 'Transactions' in i: + self.database = self.inputFile.set_index('tid').T.to_dict(orient='records')[0] + if 'tid' in i and 'Patterns' in i: + self.database = self.inputFile.set_index('tid').T.to_dict(orient='records')[0] + if isinstance(self.inputFile, str): + if validators.url(self.inputFile): + data = urlopen(self.inputFile) + for line in data: + numberOfTransaction += 1 + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self.sep)] + temp = [x for x in temp if x] + self.database[numberOfTransaction] = temp + else: + try: + with open(self.inputFile, 'r', encoding='utf-8') as f: + for line in f: + numberOfTransaction += 1 + line.strip() + temp = [i.rstrip() for i in line.split(self.sep)] + temp = [x for x in temp if x] + self.database[numberOfTransaction] = temp + except IOError: + print("File Not Found") + quit() + self.lengthList = [len(s) for s in self.database.values()]
+ + +
+[docs] + def getDatabaseSize(self) -> int: + """ + get the size of database + :return: dataset size + :rtype: int + """ + return len(self.database)
+ + +
+[docs] + def getTotalNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def getMinimumTransactionLength(self) -> int: + """ + get the minimum transaction length + :return: minimum transaction length + :rtype: int + """ + return min(self.lengthList)
+ + +
+[docs] + def getAverageTransactionLength(self) -> float: + """ + get the average transaction length. It is sum of all transaction length divided by database length. + :return: average transaction length + :rtype: float + """ + totalLength = sum(self.lengthList) + return totalLength / len(self.database)
+ + +
+[docs] + def getMaximumTransactionLength(self) -> int: + """ + get the maximum transaction length + :return: maximum transaction length + :rtype: int + """ + return max(self.lengthList)
+ + +
+[docs] + def getStandardDeviationTransactionLength(self) -> float: + """ + get the standard deviation transaction length + :return: standard deviation transaction length + :rtype: float + """ + return statistics.pstdev(self.lengthList)
+ + +
+[docs] + def getVarianceTransactionLength(self) -> float: + """ + get the variance transaction length + :return: variance transaction length + :rtype: float + """ + return statistics.variance(self.lengthList)
+ + +
+[docs] + def getNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def convertDataIntoMatrix(self) -> np.ndarray: + singleItems = self.getSortedListOfItemFrequencies() + # big_array = np.zeros((self.getDatabaseSize(), len(self.getSortedListOfItemFrequencies()))) + itemsets = {} + for i in self.database: + for item in singleItems: + if item in itemsets: + if item in self.database[i]: + itemsets[item].append(1) + else: + itemsets[item].append(0) + else: + if item in self.database[i]: + itemsets[item] = [1] + else: + itemsets[item] = [0] + # new = pd.DataFrame.from_dict(itemsets) + data = list(itemsets.values()) + an_array = np.array(data) + return an_array
+ + +
+[docs] + def getSparsity(self) -> float: + """ + get the sparsity of database. sparsity is percentage of 0 of database. + :return: database sparsity + :rtype: float + """ + big_array = self.convertDataIntoMatrix() + n_zeros = np.count_nonzero(big_array == 0) + return (n_zeros / big_array.size)
+ + +
+[docs] + def getDensity(self) -> float: + """ + get the sparsity of database. sparsity is percentage of 0 of database. + :return: database sparsity + :rtype: float + """ + big_array = self.convertDataIntoMatrix() + n_zeros = np.count_nonzero(big_array != 0) + return (n_zeros / big_array.size)
+ + +
+[docs] + def getSortedListOfItemFrequencies(self) -> dict: + """ + get sorted list of item frequencies + :return: item frequencies + :rtype: dict + """ + itemFrequencies = {} + for tid in self.database: + for item in self.database[tid]: + itemFrequencies[item] = itemFrequencies.get(item, 0) + itemFrequencies[item] += 1 + self.itemFrequencies = {k: v for k, v in sorted(itemFrequencies.items(), key=lambda x: x[1], reverse=True)} + return self.itemFrequencies
+ + +
+[docs] + def getFrequenciesInRange(self) -> dict: + fre = self.getSortedListOfItemFrequencies() + rangeFrequencies = {} + maximum = max([i for i in fre.values()]) + values = [int(i*maximum/6) for i in range(1,6)] + va = len({key: val for key, val in fre.items() if val > 0 and val < values[0]}) + rangeFrequencies[va] = values[0] + for i in range(1,len(values)): + va = len({key: val for key, val in fre.items() if val < values[i] and val > values[i-1]}) + rangeFrequencies[va] = values[i] + return rangeFrequencies
+ + +
+[docs] + def getTransanctionalLengthDistribution(self) -> dict: + """ + Get transaction length + :return: a dictionary with transaction + :rtype: dict + """ + transactionLength = {} + for length in self.lengthList: + transactionLength[length] = transactionLength.get(length, 0) + transactionLength[length] += 1 + return {k: v for k, v in sorted(transactionLength.items(), key=lambda x: x[0])}
+ + +
+[docs] + def save(self, data: dict, outputFile: str) -> None: + """ + store data into outputFile + :param data: input data + :type data: dict + :param outputFile: output file name or path to store + :type outputFile: str + :return: None + """ + with open(outputFile, 'w') as f: + for key, value in data.items(): + f.write(f'{key}\t{value}\n')
+ + +
+[docs] + def printStats(self) -> None: + print(f'Database size (total no of transactions) : {self.getDatabaseSize()}') + print(f'Number of items : {self.getNumberOfItems()}') + print(f'Minimum Transaction Size : {self.getMinimumTransactionLength()}') + print(f'Average Transaction Size : {self.getAverageTransactionLength()}') + print(f'Maximum Transaction Size : {self.getMaximumTransactionLength()}') + print(f'Standard Deviation Transaction Size : {self.getStandardDeviationTransactionLength()}') + print(f'Variance in Transaction Sizes : {self.getVarianceTransactionLength()}') + print(f'Sparsity : {self.getSparsity()}')
+ + +
+[docs] + def plotGraphs(self) -> None: + # itemFrequencies = self.getFrequenciesInRange() + transactionLength = self.getTransanctionalLengthDistribution() + plt.plotLineGraphFromDictionary(self.itemFrequencies, 100, 0, 'Frequency', 'No of items', 'frequency') + plt.plotLineGraphFromDictionary(transactionLength, 100, 0, 'transaction length', 'transaction length', 'frequency')
+
+ + + +if __name__ == '__main__': + data = {'tid': [1, 2, 3, 4, 5, 6, 7], + + 'Transactions': [['a', 'd', 'e'], ['b', 'a', 'f', 'g', 'h'], ['b', 'a', 'd', 'f'], ['b', 'a', 'c'], + ['a', 'd', 'g', 'k'], + + ['b', 'd', 'g', 'c', 'i'], ['b', 'd', 'g', 'e', 'j']]} + + # data = pd.DataFrame.from_dict('transactional_T10I4D100K.csv') + import PAMI.extras.graph.plotLineGraphFromDictionary as plt + import pandas as pd + # obj = TransactionalDatabase(data) + obj = TransactionalDatabase(sys.argv[1], sys.argv[2]) + obj = TransactionalDatabase(pd.DataFrame(data)) + obj.run() + obj.printStats() + obj.plotGraphs() + + + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/stats/graphDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/stats/graphDatabase.html new file mode 100644 index 000000000..22058da3a --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/stats/graphDatabase.html @@ -0,0 +1,219 @@ + + + + + + PAMI.extras.stats.graphDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.stats.graphDatabase

+import networkx as nx
+import matplotlib.pyplot as plt
+
+
+[docs] +class graphDatabase: + + def __init__(self, iFile): + self.graphs = [] + current_graph = {'vertices': [], 'edges': []} + + with open(iFile, 'r') as file: + for line in file: + if line.startswith('t #'): + if current_graph['vertices'] or current_graph['edges']: + self.graphs.append(current_graph) + current_graph = {'vertices': [], 'edges': []} + elif line.startswith('v'): + _, v_id, label = line.split() + current_graph['vertices'].append((int(v_id), int(label))) + elif line.startswith('e'): + _, v1, v2, label = line.split() + current_graph['edges'].append((int(v1), int(v2), int(label))) + + if current_graph['vertices'] or current_graph['edges']: + self.graphs.append(current_graph) + +
+[docs] + def printIndividualGraphStats(self): + for i, graph in enumerate(self.graphs): + print(f"Graph {i}:") + num_vertices = len(graph['vertices']) + num_edges = len(graph['edges']) + vertex_labels = set(label for _, label in graph['vertices']) + edge_labels = set(label for _, _, label in graph['edges']) + + print(f" Number of vertices: {num_vertices}") + print(f" Number of edges: {num_edges}") + print(f" Unique vertex labels: {vertex_labels}") + print(f" Unique edge labels: {edge_labels}")
+ + +
+[docs] + def printGraphDatabaseStatistics(self): + total_nodes = 0 + total_edges = 0 + vertex_labels = set() + edge_labels = set() + + self.nodes_per_graph = [len(graph['vertices']) for graph in self.graphs] + self.edges_per_graph = [len(graph['edges']) for graph in self.graphs] + + for graph in self.graphs: + total_nodes += len(graph['vertices']) + total_edges += len(graph['edges']) + + for vertex in graph['vertices']: + vertex_labels.add(vertex[1]) + + for edge in graph['edges']: + edge_labels.add(edge[2]) + + average_nodes = sum(self.nodes_per_graph) / len(self.graphs) if self.graphs else 0 + average_edges = sum(self.edges_per_graph) / len(self.graphs) if self.graphs else 0 + max_nodes = max(self.nodes_per_graph) if self.graphs else 0 + min_nodes = min(self.nodes_per_graph) if self.graphs else 0 + max_edges = max(self.edges_per_graph) if self.graphs else 0 + min_edges = min(self.edges_per_graph) if self.graphs else 0 + total_unique_vertex_labels = len(vertex_labels) + total_unique_edge_labels = len(edge_labels) + + print(f'average_nodes: {average_nodes}') + print(f'average_edges: {average_edges}') + print(f'max_nodes: {max_nodes}') + print(f'min_nodes: {min_nodes}') + print(f'max_edges: {max_edges}') + print(f'min_edges: {min_edges}') + print(f'total_unique_vertex_labels: {total_unique_vertex_labels}') + print(f'total_unique_edge_labels: {total_unique_edge_labels}')
+ + +
+[docs] + def plotNodeDistribution(self): + + plt.figure(figsize=(6, 4)) + plt.hist(self.nodes_per_graph, bins=max(20, len(set(self.nodes_per_graph))), edgecolor='black') + plt.title('Distribution of Nodes per Graph') + plt.xlabel('Number of Nodes') + plt.ylabel('Frequency') + plt.tight_layout() + plt.show()
+ + +
+[docs] + def plotEdgeDistribution(self): + + plt.figure(figsize=(6, 4)) + plt.hist(self.edges_per_graph, bins=max(20, len(set(self.edges_per_graph))), edgecolor='black') + plt.title('Distribution of Edges per Graph') + plt.xlabel('Number of Edges') + plt.ylabel('Frequency') + plt.tight_layout() + plt.show()
+
+ + + +if __name__ == '__main__': + file_path = 'Chemical_340.txt' + obj = graphDatabase(file_path) + obj.printGraphDatabaseStatistics() + obj.printIndividualGraphStats() + obj.plotNodeDistribution() + obj.plotEdgeDistribution() +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/stats/sequentialDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/stats/sequentialDatabase.html new file mode 100644 index 000000000..0d32d8848 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/stats/sequentialDatabase.html @@ -0,0 +1,600 @@ + + + + + + PAMI.extras.stats.sequentialDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.stats.sequentialDatabase

+# SequentialDatabase is to get stats of database like avarage, minimun, maximum  and so on
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.stats import sequentialDatabase as db
+#
+#             obj = db.sequentialDatabase(iFile, "\t")
+#
+#             obj.save(oFile)
+#
+#             obj.run()
+#
+#             obj.printStats()
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import statistics
+import validators
+from urllib.request import urlopen
+import PAMI.extras.graph.plotLineGraphFromDictionary as plt
+import sys
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+
+
+
+[docs] +class sequentialDatabase(): + """ + SequentialDatabase is to get stats of database like avarage, minimun, maximum and so on. + + :Attributes: + + :param inputFile: file : + input file path + :param sep: str + separator in file. Default is tab space. + + :Methods: + + readDatabase(): + read sequential database from input file and store into database and size of each sequence and subsequences. + getDatabaseSize(self): + get the size of database + getTotalNumberOfItems(self): + get the number of items in database. + getMinimumSequenceLength(self): + get the minimum sequence length + getAverageSubsequencePerSequenceLength(self): + get the average subsequence length per sequence length. It is sum of all subsequence length divided by sequence length. + getAverageItemPerSubsequenceLength(self): + get the average Item length per subsequence. It is sum of all item length divided by subsequence length. + getMaximumSequenceLength(self): + get the maximum sequence length + getStandardDeviationSubsequenceLength(self): + get the standard deviation subsequence length + getVarianceSequenceLength(self): + get the variance Sequence length + getSequenceSize(self): + get the size of sequence + getMinimumSubsequenceLength(self): + get the minimum subsequence length + getAverageItemPerSequenceLength(self): + get the average item length per sequence. It is sum of all item length divided by sequence length. + getMaximumSubsequenceLength(self): + get the maximum subsequence length + getStandardDeviationSubsequenceLength(self): + get the standard deviation subsequence length + getVarianceSubsequenceLength(self): + get the variance subSequence length + getSortedListOfItemFrequencies(self): + get sorted list of item frequencies + getFrequenciesInRange(self): + get sorted list of item frequencies in some range + getSequencialLengthDistribution(self): + get Sequence length Distribution + getSubsequencialLengthDistribution(self): + get subSequence length distribution + printStats(self): + to print the all status of sequence database + plotGraphs(self): + to plot the distribution about items, subsequences in sequence and items in subsequence + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.dbStats import SequentialDatabase as db + + obj = db.SequentialDatabase(iFile, "\t") + + obj.save(oFile) + + obj.run() + + obj.printStats() + + + **Executing the code on terminal:** + ------------------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 sequentialDatabase.py <inputFile> + + Example Usage: + + (.venv) $ python3 sequentialDatabase.py sampleDB.txt + + + + **Sample run of the importing code:** + ---------------------------------------------------- + import PAMI.extra.DBstats.SequentialDatabase as alg + _ap=alg.SequentialDatabase(inputfile,sep) + _ap.readDatabase() + _ap.printStats() + _ap.plotGraphs() + **Credits:** + --------------------- + The complete program was written by Shota Suzuki under the supervision of Professor Rage Uday Kiran. + """ + + def __init__(self, inputFile: str, sep: str='\t') -> None: + """ + :param inputFile: input file name or path + :type inputFile: str + :param sep: separator character for input file + :type sep: str + :return: None + """ + self.inputFile = inputFile + self.seqLengthList = [] + self.subSeqLengthList = [] + self.sep = sep + self.database = {} + +
+[docs] + def readDatabase(self) -> None: + """ + read sequential database from input file and store into database and size of each sequence and subsequences. + """ + if isinstance(self.inputFile, str): + if validators.url(self.inputFile): + data = urlopen(self.inputFile) + rowNum=0 + for line in data: + line.strip() + temp = [i.rstrip() for i in line.split('-1')] + temp = [x for x in temp if x] + temp.pop() + seq = [] + self.seqLengthList.append(len(temp)) + self.subSeqLengthList.append([len(i) for i in temp]) + for i in temp: + if len(i) > 1: + tempSorted=list(sorted(set(i.split()))) + seq.append(tempSorted) + else: + seq.append(i) + rowNum+=1 + if seq: + self.database[rowNum]=seq + else: + with open(self.inputFile, 'r') as f: + rowNum = 0 + for line in f: + temp = [i.rstrip(self.sep) for i in line.split('-1')] + temp = [x for x in temp if x] + temp.pop() + seq = [] + self.seqLengthList.append(len(temp)) + subseq=[] + for i in temp: + if len(i) > 1: + tempSorted = list(sorted(set(i.split()))) + subseq.append(len(tempSorted)) + seq.append(tempSorted) + else: + seq.append(i) + subseq.append(len(i)) + if subseq!=[]: + self.subSeqLengthList.append(subseq) + rowNum += 1 + if seq: + self.database[rowNum] = seq
+ + + +
+[docs] + def getDatabaseSize(self) -> int: + """ + get the size of database + :return: dataset size + :rtype: int + """ + return len(self.database)
+ + +
+[docs] + def getTotalNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def getMinimumSequenceLength(self) -> int: + """ + get the minimum sequence length + :return: minimum sequence length + :rtype: int + """ + return min(self.seqLengthList)
+ + +
+[docs] + def getAverageSubsequencePerSequenceLength(self) -> float: + """ + get the average subsequence length per sequence length. It is sum of all subsequence length divided by sequence length. + :return: average subsequence length per sequence length + :rtype: float + """ + totalLength = sum(self.seqLengthList) + return totalLength / len(self.database)
+ + +
+[docs] + def getAverageItemPerSubsequenceLength(self) -> float: + + """ + get the average Item length per subsequence. It is sum of all item length divided by subsequence length. + :return: average Item length per subsequence + :rtype: float + """ + + totalLength = sum(list(map(sum,self.subSeqLengthList))) + return totalLength / sum(self.seqLengthList)
+ + +
+[docs] + def getMaximumSequenceLength(self) -> int: + """ + get the maximum sequence length + :return: maximum sequence length + :rtype: int + """ + return max(self.seqLengthList)
+ + +
+[docs] + def getStandardDeviationSequenceLength(self) -> float: + """ + get the standard deviation sequence length + :return: standard deviation sequence length + :rtype: float + """ + return statistics.pstdev(self.seqLengthList)
+ + +
+[docs] + def getVarianceSequenceLength(self) -> float: + """ + get the variance Sequence length + :return: variance Sequence length + :rtype: float + """ + return statistics.variance(self.seqLengthList)
+ + +
+[docs] + def getSequenceSize(self) -> int: + """ + get the size of sequence + :return: sequences size + :rtype: int + """ + return sum(self.seqLengthList)
+ + +
+[docs] + def getMinimumSubsequenceLength(self) -> int: + """ + get the minimum subsequence length + :return: minimum subsequence length + :rtype: int + """ + return min(list(map(min,self.subSeqLengthList)))
+ + +
+[docs] + def getAverageItemPerSequenceLength(self) -> float: + """ + get the average item length per sequence. It is sum of all item length divided by sequence length. + :return: average item length per sequence + :rtype: float + """ + totalLength = sum(list(map(sum,self.subSeqLengthList))) + return totalLength / len(self.database)
+ + +
+[docs] + def getMaximumSubsequenceLength(self) -> int: + """ + get the maximum subsequence length + :return: maximum subsequence length + :rtype: int + """ + return max(list(map(max,self.subSeqLengthList)))
+ + +
+[docs] + def getStandardDeviationSubsequenceLength(self) -> float: + """ + get the standard deviation subsequence length + :return: standard deviation subsequence length + :rtype: float + """ + allList=[] + for i in self.subSeqLengthList: + allList=allList+i + return statistics.pstdev(allList)
+ + +
+[docs] + def getVarianceSubsequenceLength(self) -> float: + """ + get the variance subSequence length + :return: variance subSequence length + :rtype: float + """ + allList = [] + for i in self.subSeqLengthList: + allList = allList + i + return statistics.variance(allList)
+ + +
+[docs] + def getSortedListOfItemFrequencies(self) -> Dict[str, int]: + """ + get sorted list of item frequencies + :return: item frequencies + :rtype: dict + """ + itemFrequencies = {} + for seq in self.database: + for sub in self.database[seq]: + for item in sub: + itemFrequencies[item] = itemFrequencies.get(item, 0) + itemFrequencies[item] += 1 + return {k: v for k, v in sorted(itemFrequencies.items(), key=lambda x: x[1], reverse=True)}
+ + +
+[docs] + def getFrequenciesInRange(self) -> Dict[int, int]: + """ + get sorted list of item frequencies in some range + :return: item separated by its frequencies + :rtype: dict + """ + fre = self.getSortedListOfItemFrequencies() + rangeFrequencies = {} + maximum = max([i for i in fre.values()]) + values = [int(i * maximum / 6) for i in range(1, 6)] + va = len({key: val for key, val in fre.items() if val > 0 and val < values[0]}) + rangeFrequencies[values[0]] = va + for i in range(1, len(values)): + va = len({key: val for key, val in fre.items() if val < values[i] and val > values[i - 1]}) + rangeFrequencies[values[i]] = va + return rangeFrequencies
+ + +
+[docs] + def getSequencialLengthDistribution(self) -> Dict[int, int]: + """ + get Sequence length Distribution + :return: Sequence length + :rtype: dict + """ + transactionLength = {} + for length in self.seqLengthList: + transactionLength[length] = transactionLength.get(length, 0) + transactionLength[length] += 1 + return {k: v for k, v in sorted(transactionLength.items(), key=lambda x: x[0])}
+ + +
+[docs] + def getSubsequencialLengthDistribution(self) -> Dict[int, int]: + """ + get subSequence length distribution + :return: subSequence length + :rtype: dict + """ + transactionLength = {} + for sublen in self.subSeqLengthList: + for length in sublen: + transactionLength[length] = transactionLength.get(length, 0) + transactionLength[length] += 1 + return {k: v for k, v in sorted(transactionLength.items(), key=lambda x: x[0])}
+ + +
+[docs] + def run(self) -> None: + self.readDatabase()
+ + +
+[docs] + def printStats(self) -> None: + """ + To print the all status of sequence database + """ + print(f'Database size (total no of sequence) : {self.getDatabaseSize()}') + print(f'Number of items : {self.getTotalNumberOfItems()}') + print(f'Minimum Sequence Size : {self.getMinimumSequenceLength()}') + print(f'Average Sequence Size : {self.getAverageSubsequencePerSequenceLength()}') + print(f'Maximum Sequence Size : {self.getMaximumSequenceLength()}') + print(f'Standard Deviation Sequence Size : {self.getStandardDeviationSequenceLength()}') + print(f'Variance in Sequence Sizes : {self.getVarianceSequenceLength()}') + print(f'Sequence size (total no of subsequence) : {self.getSequenceSize()}') + print(f'Minimum subSequence Size : {self.getMinimumSubsequenceLength()}') + print(f'Average subSequence Size : {self.getAverageItemPerSubsequenceLength()}') + print(f'Maximum subSequence Size : {self.getMaximumSubsequenceLength()}') + print(f'Standard Deviation Sequence Size : {self.getStandardDeviationSubsequenceLength()}') + print(f'Variance in Sequence Sizes : {self.getVarianceSubsequenceLength()}')
+ + +
+[docs] + def plotGraphs(self) -> None: + """ + To plot the distribution about items, subsequences in sequence and items in subsequence + """ + itemFrequencies = self.getFrequenciesInRange() + seqLen = self.getSequencialLengthDistribution() + subLen=self.getSubsequencialLengthDistribution() + plt.plotLineGraphFromDictionary(itemFrequencies, 100, 'Frequency', 'No of items', 'frequency') + plt.plotLineGraphFromDictionary(seqLen, 100, 'sequence length', 'sequence length', 'frequency') + plt.plotLineGraphFromDictionary(subLen, 100, 'subsequence length', 'subsequence length', 'frequency')
+
+ + +if __name__ == '__main__': + _ap=str() + if len(sys.argv)==3 or len(sys.argv)==2: + if len(sys.argv)==3: + _ap=sequentialDatabase(sys.argv[1],sys.argv[2]) + if len(sys.argv) == 2: + _ap = sequentialDatabase(sys.argv[1]) + _ap.run() + _ap.printStats() + _ap.plotGraphs() + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/stats/temporalDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/stats/temporalDatabase.html new file mode 100644 index 000000000..f5335c72c --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/stats/temporalDatabase.html @@ -0,0 +1,642 @@ + + + + + + PAMI.extras.stats.temporalDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.stats.temporalDatabase

+# TemporalDatabase is a class used to get stats of database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.stats import TemporalDatabase as db
+#
+#             obj = db.temporalDatabase(iFile, "\t")
+#
+#             obj.save(oFile)
+#
+#             obj.run()
+#
+#             obj.printStats()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import sys
+import statistics
+import pandas as pd
+import validators
+import numpy as np
+from urllib.request import urlopen
+from typing import Dict, Union
+
+
+
+[docs] +class temporalDatabase: + """ + :Description: TemporalDatabase is class to get stats of database. + + :Attributes: + + :param inputFile : file + input file path + + :param sep : str + separator in file. Default is tab space. + + :Methods: + + run() + execute readDatabase function + readDatabase() + read database from input file + getDatabaseSize() + get the size of database + getMinimumTransactionLength() + get the minimum transaction length + getAverageTransactionLength() + get the average transaction length. It is sum of all transaction length divided by database length. + getMaximumTransactionLength() + get the maximum transaction length + getStandardDeviationTransactionLength() + get the standard deviation of transaction length + getSortedListOfItemFrequencies() + get sorted list of item frequencies + getSortedListOfTransactionLength() + get sorted list of transaction length + save(data, outputFile) + store data into outputFile + getMinimumPeriod() + get the minimum period + getAveragePeriod() + get the average period + getMaximumPeriod() + get the maximum period + getStandardDeviationPeriod() + get the standard deviation period + getNumberOfTransactionsPerTimestamp() + get number of transactions per time stamp. This time stamp range is 1 to max period. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.dbStats import TemporalDatabase as db + + obj = db.TemporalDatabase(iFile, "\t") + + obj.save(oFile) + + obj.run() + + obj.printStats() + """ + + def __init__(self, inputFile: Union[str, pd.DataFrame], sep: str = '\t') -> None: + """ + :param inputFile: input file name or path + :type inputFile: str + :param sep: separator between column names and values in input file + :type sep: str + :return: None + """ + self.inputFile = inputFile + self.database = {} + self.lengthList = [] + self.timeStampCount = {} + self.periodList = [] + self.sep = sep + self.periods = {} + +
+[docs] + def run(self) -> None: + self.readDatabase()
+ + +
+[docs] + def readDatabase(self) -> None: + """ + read database from input file and store into database and size of each transaction. + And store the period between transactions as list + """ + numberOfTransaction = 0 + if isinstance(self.inputFile, pd.DataFrame): + if self.inputFile.empty: + print("its empty..") + i = self.inputFile.columns.values.tolist() + if 'TS' in i and 'Transactions' in i: + self.database = self.inputFile.set_index('ts').T.to_dict(orient='records')[0] + if 'TS' in i and 'Patterns' in i: + self.database = self.inputFile.set_index('ts').T.to_dict(orient='records')[0] + self.timeStampCount = self.inputFile.groupby('ts').count().T.to_dict(orient='records')[0] + + if isinstance(self.inputFile, str): + if validators.url(self.inputFile): + data = urlopen(self.inputFile) + for line in data: + numberOfTransaction += 1 + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self.sep)] + temp = [x for x in temp if x] + self.database[numberOfTransaction] = temp[1:] + self.timeStampCount[int(temp[0])] = self.timeStampCount.get(int(line[0]), 0) + self.timeStampCount[int(temp[0])] += 1 + else: + try: + with open(self.inputFile, 'r', encoding='utf-8') as f: + for line in f: + numberOfTransaction += 1 + line.strip() + temp = [i.rstrip() for i in line.split(self.sep)] + temp = [x for x in temp if x] + if len(temp) > 0: + self.database[numberOfTransaction] = temp[1:] + self.timeStampCount[int(temp[0])] = self.timeStampCount.get(int(line[0]), 0) + self.timeStampCount[int(temp[0])] += 1 + except IOError: + print("File Not Found") + quit() + self.lengthList = [len(s) for s in self.database.values()] + timeStampList = sorted(list(self.database.keys())) + preTimeStamp = 0 + for ts in timeStampList: + self.periodList.append(int(ts) - preTimeStamp) + preTimeStamp = ts + + for x, y in self.database.items(): + for i in y: + if i not in self.periods: + self.periods[i] = [x, x] + else: + self.periods[i][0] = max(self.periods[i][0], x - self.periods[i][1]) + self.periods[i][1] = x + for key in self.periods: + self.periods[key][0] = max(self.periods[key][0], abs(len(self.database) - self.periods[key][1])) + self.periods = {k: v[0] for k, v in self.periods.items()}
+ + +
+[docs] + def getDatabaseSize(self) -> int: + """ + get the size of database + :return: dataset size + :rtype: int + """ + return len(self.database)
+ + +
+[docs] + def getMinimumTransactionLength(self) -> int: + """ + get the minimum transaction length + :return: minimum transaction length + :rtype: int + """ + return min(self.lengthList)
+ + +
+[docs] + def getAverageTransactionLength(self) -> float: + """ + get the average transaction length. It is sum of all transaction length divided by database length. + :return: average transaction length + :rtype: float + """ + totalLength = sum(self.lengthList) + return totalLength / len(self.database)
+ + +
+[docs] + def getMaximumTransactionLength(self) -> int: + """ + get the maximum transaction length + :return: maximum transaction length + :rtype: int + """ + return max(self.lengthList)
+ + +
+[docs] + def getStandardDeviationTransactionLength(self) -> float: + """ + get the standard deviation transaction length + :return: standard deviation transaction length + :rtype: float + """ + return statistics.pstdev(self.lengthList)
+ + +
+[docs] + def getVarianceTransactionLength(self) -> float: + """ + get the variance transaction length + :return: variance transaction length + :rtype: float + """ + return statistics.variance(self.lengthList)
+ + +
+[docs] + def convertDataIntoMatrix(self) -> np.ndarray: + singleItems = self.getSortedListOfItemFrequencies() + itemsets = {} + for tid in self.database: + for item in singleItems: + if item in itemsets: + if item in self.database[tid]: + itemsets[item].append(1) + else: + itemsets[item].append(0) + else: + if item in self.database[tid]: + itemsets[item] = [1] + else: + itemsets[item] = [0] + data = list(itemsets.values()) + an_array = np.array(data) + return an_array
+ + +
+[docs] + def getSparsity(self) -> float: + """ + get the sparsity of database. sparsity is percentage of 0 of database. + :return: database sparsity + :rtype: float + """ + big_array = self.convertDataIntoMatrix() + n_zeros = np.count_nonzero(big_array == 0) + return (n_zeros / big_array.size)
+ + +
+[docs] + def getDensity(self) -> float: + """ + get the sparsity of database. sparsity is percentage of 0 of database. + :return: database sparsity + :rtype: float + """ + big_array = self.convertDataIntoMatrix() + n_zeros = np.count_nonzero(big_array == 1) + return (1.0 - n_zeros / big_array.size)
+ + +
+[docs] + def getTotalNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def getSortedListOfItemFrequencies(self) -> Dict[str, int]: + """ + get sorted list of item frequencies + :return: item frequencies + :rtype: dict + """ + itemFrequencies = {} + for tid in self.database: + for item in self.database[tid]: + itemFrequencies[item] = itemFrequencies.get(item, 0) + itemFrequencies[item] += 1 + return {k: v for k, v in sorted(itemFrequencies.items(), key=lambda x: x[1], reverse=True)}
+ + +
+[docs] + def getFrequenciesInRange(self) -> Dict[int, int]: + fre = self.getSortedListOfItemFrequencies() + rangeFrequencies = {} + maximum = max([i for i in fre.values()]) + values = [int(i * maximum / 6) for i in range(1, 6)] + # print(maximum) + va = len({key: val for key, val in fre.items() if val > 0 and val < values[0]}) + rangeFrequencies[va] = values[0] + for i in range(1, len(values)): + va = len({key: val for key, val in fre.items() if val < values[i] and val > values[i - 1]}) + rangeFrequencies[va] = values[i] + return rangeFrequencies
+ + +
+[docs] + def getPeriodsInRange(self) -> Dict[int, int]: + fre = {k: v for k, v in sorted(self.periods.items(), key=lambda x: x[1])} + rangePeriods = {} + maximum = max([i for i in fre.values()]) + values = [int(i * maximum / 6) for i in range(1, 6)] + # print(maximum) + va = len({key: val for key, val in fre.items() if val > 0 and val < values[0]}) + rangePeriods[va] = values[0] + for i in range(1, len(values)): + va = len({key: val for key, val in fre.items() if val < values[i] and val > values[i - 1]}) + rangePeriods[va] = values[i] + return rangePeriods
+ + +
+[docs] + def getTransanctionalLengthDistribution(self) -> Dict[int, int]: + """ + get transaction length + :return: transactional length + :rtype: dict + """ + transactionLength = {} + for length in self.lengthList: + transactionLength[length] = transactionLength.get(length, 0) + transactionLength[length] += 1 + return {k: v for k, v in sorted(transactionLength.items(), key=lambda x: x[0])}
+ + +
+[docs] + def save(self, data: dict, outputFile: str) -> None: + """ + store data into outputFile + :param data: input data + :type data: dict + :param outputFile: output file name or path to store + :type outputFile: str + :return: None + """ + with open(outputFile, 'w') as f: + for key, value in data.items(): + f.write(f'{key}\t{value}\n')
+ + +
+[docs] + def getMinimumInterArrivalPeriod(self) -> int: + """ + get the minimum inter arrival period + :return: minimum inter arrival period + :rtype: int + """ + return min(self.periodList)
+ + +
+[docs] + def getAverageInterArrivalPeriod(self) -> float: + """ + get the average inter arrival period. It is sum of all period divided by number of period. + :return: average inter arrival period + :rtype: float + """ + totalPeriod = sum(self.periodList) + return totalPeriod / len(self.periodList)
+ + +
+[docs] + def getMaximumInterArrivalPeriod(self) -> int: + """ + get the maximum inter arrival period + :return: maximum inter arrival period + :rtype: int + """ + return max(self.periodList)
+ + +
+[docs] + def getMinimumPeriodOfItem(self) -> int: + """ + get the minimum period of the item + :return: minimum period + :rtype: int + """ + return min([i for i in self.periods.values()])
+ + +
+[docs] + def getAveragePeriodOfItem(self) -> float: + """ + get the average period of the item + :return: average period + :rtype: float + """ + return sum([i for i in self.periods.values()]) / len(self.periods)
+ + +
+[docs] + def getMaximumPeriodOfItem(self) -> int: + """ + get the maximum period of the item + :return: maximum period + :rtype:int + """ + return max([i for i in self.periods.values()])
+ + +
+[docs] + def getStandardDeviationPeriod(self) -> float: + """ + get the standard deviation period + :return: standard deviation period + :rtype: float + """ + return statistics.pstdev(self.periodList)
+ + +
+[docs] + def getNumberOfTransactionsPerTimestamp(self) -> Dict[int, int]: + """ + get number of transactions per time stamp + :return: number of transactions per time stamp as dict + :rtype: dict + """ + maxTS = max(list(self.timeStampCount.keys())) + return {ts: self.timeStampCount.get(ts, 0) for ts in range(1, maxTS + 1)}
+ + +
+[docs] + def printStats(self) -> None: + print(f'Database size : {self.getDatabaseSize()}') + print(f'Number of items : {self.getTotalNumberOfItems()}') + print(f'Minimum Transaction Size : {self.getMinimumTransactionLength()}') + print(f'Average Transaction Size : {self.getAverageTransactionLength()}') + print(f'Maximum Transaction Size : {self.getMaximumTransactionLength()}') + print(f'Minimum Inter Arrival Period : {self.getMinimumInterArrivalPeriod()}') + print(f'Average Inter Arrival Period : {self.getAverageInterArrivalPeriod()}') + print(f'Maximum Inter Arrival Period : {self.getMaximumInterArrivalPeriod()}') + print(f'Minimum periodicity : {self.getMinimumPeriodOfItem()}') + print(f'Average periodicity : {self.getAveragePeriodOfItem()}') + print(f'Maximum periodicicty : {self.getMaximumPeriodOfItem()}') + print(f'Standard Deviation Transaction Size : {self.getStandardDeviationTransactionLength()}') + print(f'Variance : {self.getVarianceTransactionLength()}') + print(f'Sparsity : {self.getSparsity()}')
+ + +
+[docs] + def plotGraphs(self) -> None: + itemFrequencies = self.getFrequenciesInRange() + transactionLength = self.getTransanctionalLengthDistribution() + plt.plotLineGraphFromDictionary(itemFrequencies, 100, 0, 'Frequency', 'no of items', 'frequency') + plt.plotLineGraphFromDictionary(transactionLength, 100, 0, 'transaction length', 'transaction length', + 'frequency')
+
+ + + +if __name__ == '__main__': + data = {'tid': [1, 2, 3, 4, 5, 6, 7], + + 'Transactions': [['a', 'd', 'e'], ['b', 'a', 'f', 'g', 'h'], ['b', 'a', 'd', 'f'], ['b', 'a', 'c'], + ['a', 'd', 'g', 'k'], + + ['b', 'd', 'g', 'c', 'i'], ['b', 'd', 'g', 'e', 'j']]} + + # data = pd.DataFrame.from_dict('temporal_T10I4D100K.csv') + import PAMI.extras.graph.plotLineGraphFromDictionary as plt + + if len(sys.argv) < 3: + print("Please provide two arguments.") + else: + obj = temporalDatabase(sys.argv[1], sys.argv[2]) + obj1 = temporalDatabase(pd.DataFrame(data)) + obj1.run() + if obj1.getDatabaseSize() > 0: + obj1.printStats() + obj1.plotGraphs() + else: + print("No data found in the database.") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/stats/utilityDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/stats/utilityDatabase.html new file mode 100644 index 000000000..7d137da33 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/stats/utilityDatabase.html @@ -0,0 +1,535 @@ + + + + + + PAMI.extras.stats.utilityDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.stats.utilityDatabase

+# UtilityDatabase is a code used to get stats of the database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.extras.stats import utilityDatabase as db
+#
+#             obj = db.utilityDatabase(iFile, "\t")
+#
+#             obj.save(oFile)
+#
+#             obj.run()
+#
+#             obj.printStats()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import sys
+import statistics
+from urllib.request import urlopen
+import pandas as pd
+from typing import Union
+
+
+[docs] +class utilityDatabase: + """ + :Description: UtilityDatabase is class to get stats of database. + + :Attributes: + + :param inputFile: file : + input file path + :param sep: str + separator in file. Default is tab space. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.dbStats import UtilityDatabase as db + + obj = db.UtilityDatabase(iFile, "\t" ) + + obj.save(oFile) + + obj.run() + + obj.printStats() + + """ + + def __init__(self, inputFile: Union[str, pd.DataFrame], sep: str='\t') -> None: + """ + :param inputFile: input file name or path + :type inputFile: str + :param sep: separator between files. Default is tab space + :type sep: str + :return: None + """ + self.inputFile = inputFile + self.database = {} + self.lengthList = [] + self.utility = {} + self.sep = sep + +
+[docs] + def run(self) -> None: + self.readDatabase()
+ + +
+[docs] + def creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self.Database = [] + self.utilityValues = [] + if isinstance(self.inputFile, pd.DataFrame): + if self.inputFile.empty: + print("its empty..") + i = self.inputFile.columns.values.tolist() + if 'Transactions' in i: + self.Database = self.inputFile['Transactions'].tolist() + if 'Patterns' in i: + self.Database = self.inputFile['Patterns'].tolist() + if 'Utility' in i: + self.utilityValues = self.inputFile['Utility'].tolist() + + if isinstance(self.inputFile, str): + if self.inputFile.startswith("http://") or self.inputFile.startswith("https://"): + data = urlopen(self.inputFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(":")] + transaction = [s for s in temp[0].split(self.sep)] + self.Database.append([x for x in transaction if x]) + utilities = [int(s) for s in temp[2].split(self.sep)] + self.utilityValues.append([x for x in utilities if x]) + else: + try: + with open(self.inputFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(":")] + transaction = [s for s in temp[0].split(self.sep)] + self.Database.append([x for x in transaction if x]) + utilities = [int(s) for s in temp[2].split(self.sep)] + self.utilityValues.append([x for x in utilities if x]) + except IOError: + print("File Not Found") + quit()
+ + +
+[docs] + def readDatabase(self) -> None: + """ + read database from input file and store into database and size of each transaction. + """ + numberOfTransaction = 0 + self.creatingItemSets() + for k in range(len(self.Database)): + numberOfTransaction += 1 + transaction = self.Database[k] + utilities = self.utilityValues[k] + self.database[numberOfTransaction] = transaction + for i in range(len(transaction)): + self.utility[transaction[i]] = self.utility.get(transaction[i],0) + self.utility[transaction[i]] += utilities[i] + self.lengthList = [len(s) for s in self.database.values()] + self.utility = {k: v for k, v in sorted(self.utility.items(), key=lambda x:x[1], reverse=True)}
+ + +
+[docs] + def getDatabaseSize(self) -> int: + """ + get the size of database + :return: size of database after reading from database + :rtype: int + """ + return len(self.database)
+ + +
+[docs] + def getTotalNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def getMinimumTransactionLength(self) -> int: + """ + get the minimum transaction length + :return: minimum transaction length + :rtype: int + """ + return min(self.lengthList)
+ + +
+[docs] + def getAverageTransactionLength(self) -> float: + """ + get the average transaction length. It is sum of all transaction length divided by database length. + :return: average transaction length + :rtype: float + """ + totalLength = sum(self.lengthList) + return totalLength / len(self.database)
+ + +
+[docs] + def getMaximumTransactionLength(self) -> int: + """ + get the maximum transaction length + :return: maximum transaction length + :rtype: int + """ + return max(self.lengthList)
+ + +
+[docs] + def getStandardDeviationTransactionLength(self) -> float: + """ + get the standard deviation transaction length + :return: standard deviation transaction length + :rtype: float + """ + return statistics.pstdev(self.lengthList)
+ + +
+[docs] + def getVarianceTransactionLength(self) -> float: + """ + get the variance transaction length + :return: variance transaction length + :rtype: float + """ + return statistics.variance(self.lengthList)
+ + +
+[docs] + def getNumberOfItems(self) -> int: + """ + get the number of items in database. + :return: number of items + :rtype: int + """ + return len(self.getSortedListOfItemFrequencies())
+ + +
+[docs] + def getSparsity(self) -> float: + # percentage of 0 dense dataframe + """ + get the sparsity of database + :return: sparsity of database after reading from database + :rtype: float + """ + matrixSize = self.getDatabaseSize()*len(self.getSortedListOfItemFrequencies()) + return (matrixSize - sum(self.getSortedListOfItemFrequencies().values())) / matrixSize
+ + +
+[docs] + def getSortedListOfItemFrequencies(self) -> dict: + """ + get sorted list of item frequencies + :return: item frequencies + :rtype: dict + """ + itemFrequencies = {} + for tid in self.database: + for item in self.database[tid]: + itemFrequencies[item] = itemFrequencies.get(item, 0) + itemFrequencies[item] += 1 + return {k: v for k, v in sorted(itemFrequencies.items(), key=lambda x:x[1], reverse=True)}
+ + +
+[docs] + def getFrequenciesInRange(self) -> dict: + """ + This function is used to get the Frequencies in range + :return: Frequencies In Range + :rtype: dict + """ + fre = self.getSortedListOfItemFrequencies() + rangeFrequencies = {} + maximum = max([i for i in fre.values()]) + values = [int(i*maximum/6) for i in range(1,6)] + #print(maximum) + va = len({key: val for key, val in fre.items() if val > 0 and val < values[0]}) + rangeFrequencies[va] = values[0] + for i in range(1,len(values)): + + va = len({key: val for key, val in fre.items() if val < values[i] and val > values[i-1]}) + rangeFrequencies[va] = values[i] + return rangeFrequencies
+ + +
+[docs] + def getTransanctionalLengthDistribution(self) -> dict: + """ + get transaction length + :return: Transanctional Length Distribution + :rtype: dict + """ + transactionLength = {} + for length in self.lengthList: + transactionLength[length] = transactionLength.get(length, 0) + transactionLength[length] += 1 + return {k: v for k, v in sorted(transactionLength.items(), key=lambda x:x[0])}
+ + +
+[docs] + def save(self, data, outputFile) -> None: + """ + store data into outputFile + :param data: input data + :type data: dict + :param outputFile: output file name or path to store + :type outputFile: str + :return: None + """ + with open(outputFile, 'w') as f: + for key, value in data.items(): + f.write(f'{key}\t{value}\n')
+ + +
+[docs] + def getTotalUtility(self) -> int: + """ + get sum of utility + :return: total utility + :rtype: int + """ + return sum(list(self.utility.values()))
+ + +
+[docs] + def getMinimumUtility(self) -> int: + """ + get the minimum utility + :return: minimum utility + :rtype: int + """ + return min(list(self.utility.values()))
+ + +
+[docs] + def getAverageUtility(self) -> float: + """ + get the average utility + :return: average utility + :rtype: float + """ + return sum(list(self.utility.values())) / len(self.utility)
+ + +
+[docs] + def getMaximumUtility(self) -> int: + """ + get the maximum utility + :return: maximum utility + :rtype: int + """ + return max(list(self.utility.values()))
+ + +
+[docs] + def getSortedUtilityValuesOfItem(self) -> dict: + """ + get sorted utility value each item. key is item and value is utility of item + :return: sorted dictionary utility value of item + :rtype: dict + """ + return self.utility
+ + +
+[docs] + def printStats(self) -> None: + + """ + This function is used to print the results + """ + print(f'Database size : {self.getDatabaseSize()}') + print(f'Number of items : {self.getTotalNumberOfItems()}') + print(f'Minimum Transaction Size : {self.getMinimumTransactionLength()}') + print(f'Average Transaction Size : {self.getAverageTransactionLength()}') + print(f'Maximum Transaction Size : {self.getMaximumTransactionLength()}') + print(f'Minimum utility : {self.getMinimumUtility()}') + print(f'Average utility : {self.getAverageUtility()}') + print(f'Maximum utility : {self.getMaximumUtility()}') + print(f'Standard Deviation Transaction Size : {self.getStandardDeviationTransactionLength()}') + print(f'Variance : {self.getVarianceTransactionLength()}') + print(f'Sparsity : {self.getSparsity()}')
+ + + +
+[docs] + def plotGraphs(self) -> None: + + itemFrequencies = self.getFrequenciesInRange() + transactionLength = self.getTransanctionalLengthDistribution() + plt.plotLineGraphFromDictionary(itemFrequencies, 100, 0, 'Frequency', 'no of items', 'frequency') + plt.plotLineGraphFromDictionary(transactionLength, 100, 0, 'transaction length', 'transaction length', 'frequency')
+
+ + + +if __name__ == '__main__': + import PAMI.extras.graph.plotLineGraphFromDictionary as plt + + try: + if len(sys.argv) != 3: + raise ValueError("Missing some of the input parameters. Format: python UtilityDatabase.py <fileName> <seperator (optional)>") + + iFile, separator = sys.argv[1], sys.argv[2] + obj = utilityDatabase(iFile, separator) + obj.run() + if obj.getDatabaseSize() > 0: + obj.printStats() + obj.plotGraphs() + else: + print("No data found in the database.") + + except ValueError as ve: + print(f"ValueError: {ve}") + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/TemporalDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/TemporalDatabase.html new file mode 100644 index 000000000..dbb0994c7 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/TemporalDatabase.html @@ -0,0 +1,344 @@ + + + + + + PAMI.extras.syntheticDataGenerator.TemporalDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.TemporalDatabase

+# TemporalDatabase is a collection of timestamps and along with data at particular time.
+#
+#  **Importing this algorithm into a python program**
+#  --------------------------------------------------------
+#
+#             from PAMI.extras.syntheticDataGenerator import TemporalDatabase as db
+#
+#             temporalDB = db(numOfTransactions, avgTransactionLength, numItems, outFileName)
+#
+#             temporalDB.create(percentage)
+#
+#
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+
+import pandas as pd
+import numpy as np
+import sys
+
+
+
+[docs] +class TemporalDatabase: + """ + :Description: - creates a temporal database with required parameter (e.g.,numOfTransactions, avgLenOfTransactions, numItems and outputFile). + - output can be printed in two ways either in text file or dataframe depending on the input type. + + :Attributes: + + :param numOfTransactions: int + number of transactions + + :param avgLenOfTransactions: int + average length of transactions + + :param numItems: int + number of items + + :param outputFile: str + the name the output file + + :param percentage: int + percentage of coinToss for TID of temporalDatabase + + :param sep: str + seperator for database output file + + :param typeOfFile: str + specify database or dataframe to get corresponding output + + :Methods: + getFileName(): + returns filename + + createTemporalFile(): + creates temporal database file or dataframe + + getDatabaseAsDataFrame: + returns dataframe + + performCoinFlip(): + Perform a coin flip with the given probability + + tuning(): + Tune the arrayLength to match avgLenOfTransactions + + createTemporalFile(): + create Temporal database or dataframe depending on input + + **Methods to execute code on terminal** + --------------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 TemporalDatabase.py <numOfTransactions> <avgLenOfTransactions> <numItems> <outputFile> + + Example Usage: + + (.venv) $ python3 TemporalDatabase.py 50.0 10.0 100 temporal.txt + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.syntheticDataGenerator import TemporalDatabase as db + + temporalDB = db(numOfTransactions, avgTransactionLength, numItems, outFileName) + + temporalDB.create(percentage) + + + """ + def __init__(self, numOfTransactions: int, avgLenOfTransactions: int, + numItems: int, outputFile: str, percentage: int=50, + sep: str='\t', typeOfFile: str="Database") -> None: + + """ + Initialize the generateTemporalDatabase class with required parameters. + """ + + self.numOfTransactions = numOfTransactions + self.avgLenOfTransactions = avgLenOfTransactions + self.numItems = numItems + self.outputFile = outputFile + if percentage > 1: + self.percentage = percentage / 100 + else: + self.percentage = percentage + self.sep = sep + self.typeOfFile = typeOfFile.lower() + +
+[docs] + def getFileName(self) -> str: + """ + This function take the name of the outputfile. + :return: outputFile. + """ + return self.outputFile
+ + +
+[docs] + def getDatabaseAsDataFrame(self) -> pd.DataFrame: + """ + This function return the database in dataframe format. + + return: pd.dataframe + """ + return self.df
+ + +
+[docs] + def performCoinFlip(self, probability: float) -> bool: + """Perform a coin flip with the given probability.""" + result = np.random.choice([0, 1], p=[1 - probability, probability]) + return result == 1
+ + + +
+[docs] + def tuning(self, array, sumRes) -> list: + """ + Tune the array so that the sum of the values is equal to sumRes + + Parameters: + :param array: list of values randomly generated. + :type array: list + :param sumRes: target sum + :type sumRes: int + + Returns: + array: list - tuned array + """ + # first generate a random array of length n whose values average to m + values = np.random.randint(1, self.numItems, len(array)) + + while np.sum(values) != sumRes: + # get index of largest value + # if sum is too large, decrease the largest value + if np.sum(values) > sumRes: + maxIndex = np.argmax(values) + values[maxIndex] -= 1 + # if sum is too small, increase the smallest value + else: + minIndex = np.argmin(values) + values[minIndex] += 1 + + # get location of all values greater than numItems + + for i in range(len(array)): + array[i][1] = values[i] + + return array
+ + +
+[docs] + def create(self) -> None: + """ + create Temporal database or dataframe depending on type of file specified. + :return: None + """ + + db = [] + lineSize = [] + for i in range(self.numOfTransactions): + db.append([i]) + if self.performCoinFlip(self.percentage): + lineSize.append([i,0]) + + # make it so that sum of lineSize[1] equal to numTransactions * avgLenOfTransactions + sumRes = self.numOfTransactions * self.avgLenOfTransactions + self.tuning(lineSize, sumRes) + + for i in range(len(lineSize)): + if lineSize[i][1] > self.numItems: + raise ValueError("Error: Either increase numItems or decrease avgLenOfTransactions or modify percentage") + line = np.random.choice(range(1, self.numItems + 1), lineSize[i][1], replace=False) + db[lineSize[i][0]].extend(line) + + if self.typeOfFile == "database": + with open(self.outputFile, "w") as outFile: + for line in db: + outFile.write(self.sep.join(map(str, line)) + '\n') + outFile.close() + + if self.typeOfFile == "dataframe": + data = { + 'timestamp': [line[0] for line in db], + 'transactions': pd.Series([line[1:] for line in db]) + } + self.df = pd.DataFrame(data) + + print("Temporal database created successfully")
+
+ + +if __name__ == '__main__': + + obj = TemporalDatabase(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) + obj.create(sys.argv[5]) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/TransactionalDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/TransactionalDatabase.html new file mode 100644 index 000000000..96d62f0cf --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/TransactionalDatabase.html @@ -0,0 +1,337 @@ + + + + + + PAMI.extras.syntheticDataGenerator.TransactionalDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.TransactionalDatabase

+# TransactionalDatabase is a collection of transactions. It only considers the data in  transactions and ignores the metadata.
+#
+#  **Importing this algorithm into a python program**
+#  --------------------------------------------------------
+#     from PAMI.extras.syntheticDataGenerator import TransactionalDatabase as db
+#
+#     obj = db(10, 5, 10)
+#
+#     obj.create()
+#
+#     obj.save('db.txt')
+#
+#     print(obj.getTransactions())
+#
+
+import numpy as np
+import pandas as pd
+import sys
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+
+[docs] +class TransactionalDatabase: + """ + :Description: TransactionalDatabase is a collection of transactions. It only considers the data in transactions and ignores the metadata. + :Attributes: + + numLines: int + Number of lines + avgItemsPerLine: int + Average number of items per line + numItems: int + Total number of items + + :Methods: + + create: + Generate the transactional database + save: + Save the transactional database to a user-specified file + getTransactions: + Get the transactional database + + + **Methods to execute code on terminal** + --------------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 TransactionalDatabase.py <numLines> <avgItemsPerLine> <numItems> + + Example Usage: + + (.venv) $ python3 TransactionalDatabase.py 50.0 10.0 100 + + + + **Importing this algorithm into a python program** + -------------------------------------------------------- + from PAMI.extras.syntheticDataGenerator import TransactionalDatabase as db + + obj = db(10, 5, 10) + + obj.create() + + obj.save('db.txt') + + print(obj.getTransactions()) + + + """ + + def __init__(self, numLines, avgItemsPerLine, numItems) -> None: + """ + Initialize the transactional database with the given parameters + + :param numLines: number of lines + :type numLines: int + :param avgItemsPerLine: average number of items per line + :type avgItemsPerLine: int + :param numItems: total number of items + :type numItems: int + """ + + self.numLines = numLines + self.avgItemsPerLine = avgItemsPerLine + self.numItems = numItems + self.db = [] + +
+[docs] + def tuning(self, array, sumRes) -> list: + """ + Tune the array so that the sum of the values is equal to sumRes + + :param array: list of values + :type array: list + :param sumRes: target sum + :type sumRes: int + + Returns: + array: list - tuned array + """ + + while np.sum(array) != sumRes: + # get index of largest value + randIndex = np.random.randint(0, len(array)) + # if sum is too large, decrease the largest value + if np.sum(array) > sumRes: + array[randIndex] -= 1 + # if sum is too small, increase the smallest value + else: + minIndex = np.argmin(array) + array[randIndex] += 1 + return array
+ + + +
+[docs] + def generateArray(self, nums, avg, maxItems) -> list: + """ + Generate a random array of length n whose values average to m + + :param nums: number of values + :type nums: int + :param avg: average value + :type avg: int + :param maxItems: maximum value + :type maxItems: int + + Returns: + values: list - random array + """ + + # generate n random values + values = np.random.randint(1, maxItems, nums) + + sumRes = nums * avg + + self.tuning(values, sumRes) + + # if any value is less than 1, increase it and tune the array again + while np.any(values < 1): + for i in range(nums): + if values[i] < 1: + values[i] += 1 + self.tuning(values, sumRes) + + while np.any(values > maxItems): + for i in range(nums): + if values[i] > maxItems: + values[i] -= 1 + self.tuning(values, sumRes) + + + # if all values are same then randomly increase one value and decrease another + while np.all(values == values[0]): + values[np.random.randint(0, nums)] += 1 + self.tuning(values, sumRes) + + return values
+ + +
+[docs] + def create(self) -> None: + """ + Generate the transactional database with the given input parameters. + Returns: None + """ + db = set() + + values = self.generate_array(self.numLines, self.avgItemsPerLine, self.numItems) + + for value in values: + line = np.random.choice(range(1, self.numItems + 1), value, replace=False) + self.db.append(line)
+ + +
+[docs] + def save(self, filename) -> None: + """ + Save the transactional database to a file + + :param filename: name of the file + :type filename: str + """ + + with open(filename, 'w') as f: + for line in self.db: + f.write(','.join(map(str, line)) + '\n')
+ + +
+[docs] + def getTransactions(self) -> pd.DataFrame: + """ + Get the transactional database in dataFrame format + + Returns: + db: pd.dataFrame - transactional database + """ + df = pd.DataFrame(self.db) + return df
+
+ + + +if __name__ == "__main__": + # test the class + obj = TransactionalDatabase(sys.argv[1], sys.argv[2], sys.argv[3]) + obj.create() + obj.save(sys.argv[4]) + # print(obj.getTransactions()) + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticGeoreferentialTemporal.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticGeoreferentialTemporal.html new file mode 100644 index 000000000..2119c8338 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticGeoreferentialTemporal.html @@ -0,0 +1,176 @@ + + + + + + PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTemporal — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTemporal

+import random as _rd
+import sys as _sys
+
+
+
+[docs] +class createGeoreferentialTemporalDatabase: + """ + This class create synthetic geo-referential temporal database. + + :Attribute: + + totalTransactions : int + No of transactions + noOfItems : int or float + No of items + avgTransactionLength : str + The length of average transaction + outputFile: str + Name of the output file. + + :Methods: + + createGeoreferentialTemporalDatabase(outputFile) + Create geo-referential temporal database and store into outputFile + + **Credits:** + --------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + def __init__(self, transactions: int, items: int, avgTransaction: int) -> None: + self._totalTransactions = transactions + self._noOfItems = items + self._avgTransactionLength = avgTransaction + +
+[docs] + def createGeoreferentialTemporalDatabase(self, outputFile: str) -> None: + """ + create transactional database and return outputFileName + + :param outputFile: file name or path to store database + :type outputFile: str + :return: outputFile name + """ + writer = open(outputFile, 'w+') + items = [] + count = 1 + for i in range(self._noOfItems): + lat = _rd.randint(1, self._noOfItems) + lon = _rd.randint(1, self._noOfItems) + if lat == lon: + lon = _rd.randint(1, self._noOfItems) + stt = '(' + str(lat) + ' ' + str(lon) + ')' + items.append(stt) + for i in range(self._totalTransactions): + length = _rd.randint(1, self._avgTransactionLength + 20) + st = str(count) + for i in range(length): + rd = _rd.randint(0, len(items) - 1) + item = items[rd] + st = st + str(item) + '\t' + writer.write("%s \n" % st) + count += 1
+
+ + +if __name__ == "__main__": + _ap = str() + _ap = createSyntheticGeoreferentialTemporal(100000, 870, 10) + _ap.createGeoreferentialTemporalDatabase("T10_geo_temp.txt") +else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticGeoreferentialTransactions.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticGeoreferentialTransactions.html new file mode 100644 index 000000000..d7887a907 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticGeoreferentialTransactions.html @@ -0,0 +1,175 @@ + + + + + + PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTransactions — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTransactions

+import random as _rd
+import sys as _sys
+
+
+
+[docs] +class createSyntheticGeoreferentialTransaction: + """ + This class create synthetic geo-referential transaction database. + + :Attribute: + + totalTransactions : int + No of transactions + items : int + No of items + avgTransactionLength : str + The length of average transaction + outputFile: str + Name of the output file. + + :Methods: + + createGeoreferentialTransactionDatabase(outputFile) + Create geo-referential transactional database and store into outputFile + + + **Credits:** + -------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + def __init__(self, transactions, items, avgTransaction): + self._totalTransactions = transactions + self._noOfItems = items + self._avgTransactionLength = avgTransaction + +
+[docs] + def createGeoreferentialTransactionalDatabase(self, outputFile): + """ + create transactional database and return outputFileName + + :param outputFile: file name or path to store database + :type outputFile: str + :return: outputFile name + """ + writer = open(outputFile, 'w+') + items = [] + for i in range(self._noOfItems): + lat = _rd.randint(1, self._noOfItems) + lon = _rd.randint(1, self._noOfItems) + if lat == lon: + lon = _rd.randint(1, self._noOfItems) + stt = '(' + str(lat) + ' ' + str(lon) + ')' + items.append(stt) + for i in range(self._totalTransactions): + length = _rd.randint(1, self._avgTransactionLength + 20) + st = str() + for i in range(length): + rd = _rd.randint(0, len(items) - 1) + item = items[rd] + st = st + str(item) + '\t' + writer.write("%s \n" % st)
+
+ + +if __name__ == "__main__": + _ap = str() + _ap = createSyntheticGeoreferentialTransaction(100000, 870, 10) + _ap.createGeoreferentialTransactionalDatabase("T10_geo.txt") +else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticGeoreferentialUncertainTransaction.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticGeoreferentialUncertainTransaction.html new file mode 100644 index 000000000..1b6d36266 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticGeoreferentialUncertainTransaction.html @@ -0,0 +1,179 @@ + + + + + + PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialUncertainTransaction — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialUncertainTransaction

+import random as _rd
+import sys as _sys
+
+
+
+[docs] +class createSyntheticGeoreferentialUncertainTransaction: + """ + This class is to create synthetic geo-referential uncertain transaction database. + + :Attribute: + + totalTransactions : int + No of transactions + noOfItems : int + No of items + avgTransactionLength : int + The length of average transaction + outputFile: str + Name of the output file. + + :Methods: + + createGeoreferentialuncertainTransactionDatabase(outputFile) + Create geo-referential transactional database store into outputFile + + **Credits:** + -------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + def __init__(self, transactions: int, items: int, avgTransaction: int) -> None: + self._totalTransactions = transactions + self._noOfItems = items + self._avgTransactionLength = avgTransaction + +
+[docs] + def createGeoreferentialUncertainTransactionalDatabase(self, outputFile: str) -> None: + """ + create transactional database and return outputFileName + + :param outputFile: file name or path to store database + :type outputFile: str + :return: outputFile name + """ + writer = open(outputFile, 'w+') + items = [] + for i in range(self._noOfItems): + lat = _rd.randint(1, self._noOfItems) + lon = _rd.randint(1, self._noOfItems) + if lat == lon: + lon = _rd.randint(1, self._noOfItems) + stt = '(' + str(lat) + ' ' + str(lon) + ')' + items.append(stt) + for i in range(self._totalTransactions): + length = _rd.randint(1, self._avgTransactionLength + 20) + st = str() + st1 = str() + for i in range(length): + rd = _rd.randint(0, len(items) - 1) + item = items[rd] + probability = _rd.uniform(0, 1) + st = st + str(item) + '\t' + st1 = st1 + str(probability) + '\t' + writer.write("%s" % st) + writer.write(":") + writer.write("%s \n" % st1)
+
+ + +if __name__ == "__main__": + _ap = str() + _ap = createSyntheticGeoreferentialUncertainTransaction(100000, 870, 10) + _ap.createGeoreferentialUncertainTransactionalDatabase("T10_geo_un.txt") +else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticTemporal.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticTemporal.html new file mode 100644 index 000000000..f4ac0dd31 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticTemporal.html @@ -0,0 +1,165 @@ + + + + + + PAMI.extras.syntheticDataGenerator.createSyntheticTemporal — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.createSyntheticTemporal

+import random as _rd
+import sys as _sys
+
+[docs] +class createSyntheticTemporal: + """ + This class create synthetic temporal database. + + :Attribute: + + totalTransactions : int + No of transactions + noOfItems : int + No of items + avgTransactionLength : str + The length of average transaction + outputFile: str + Name of the output file. + + :Methods: + + createTemporallDatabase(outputFile) + Create temporal database from DataFrame and store into outputFile + + **Credits:** + --------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + def __init__(self, transactions: int, items: int, avgTransaction: int) -> None: + self._totalTransactions = transactions + self._noOfItems = items + self._avgTransactionLength = avgTransaction + +
+[docs] + def createTemporalDatabase(self, outputFile: str) -> None: + """ + create transactional database and return outputFileName + + :param outputFile: file name or path to store database + :type outputFile: str + :return: outputFile name + """ + count = 1 + writer = open(outputFile, 'w+') + for i in range(self._totalTransactions): + length = _rd.randint(1, self._avgTransactionLength + 20) + st = str(count) + '\t' + for i in range(length): + item = _rd.randint(1, self._noOfItems) + st = st + str(item) + '\t' + writer.write("%s \n" % st) + count += 1
+
+ + +if __name__ == "__main__": + _ap = str() + _ap = createSyntheticTemporal(100000, 870, 10) + _ap.createTemporalDatabase("temporal_T10.txt") +else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticTransactions.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticTransactions.html new file mode 100644 index 000000000..5546d1e5c --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticTransactions.html @@ -0,0 +1,163 @@ + + + + + + PAMI.extras.syntheticDataGenerator.createSyntheticTransactions — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.createSyntheticTransactions

+import random as _rd
+import sys as _sys
+
+[docs] +class createSyntheticTransaction: + """ + This class create synthetic transaction database. + + :Attribute: + + totalTransactions : int + No of transactions + noOfItems : int + No of items + avgTransactionLength : int + The length of average transaction + outputFile: str + Name of the output file. + + :Methods: + + createTransactionalDatabase(outputFile) + Create transactional database and store into outputFile + + **Credits:** + --------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + def __init__(self, totalTransactions: int, items: int, avgTransactionLength: int) -> None: + self._totalTransactions = totalTransactions + self._noOfItems = items + self._avgTransactionLength = avgTransactionLength + +
+[docs] + def createTransactionalDatabase(self, outputFile: str) -> None: + """ + create transactional database and return outputFileName + + :param outputFile: file name or path to store database + :type outputFile: str + :return: outputFile name + """ + writer = open(outputFile, 'w+') + for i in range(self._totalTransactions): + length = _rd.randint(1, self._avgTransactionLength + 20) + st = str() + for i in range(length): + item = _rd.randint(1, self._noOfItems) + st = st + str(item) + '\t' + writer.write("%s \n" % st)
+
+ + +if __name__ == "__main__": + _ap = str() + _ap = createSyntheticTransaction(100000, 870, 10) + _ap.createTransactionalDatabase("T10.txt") +else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticUncertainTemporal.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticUncertainTemporal.html new file mode 100644 index 000000000..301b8ea2b --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticUncertainTemporal.html @@ -0,0 +1,172 @@ + + + + + + PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTemporal — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTemporal

+import random as _rd
+import sys as _sys
+
+
+
+[docs] +class createSyntheticUncertainTemporal: + """ + This class create synthetic temporal database. + + :Attribute: + + totalTransactions : int + Total no of transactions + noOfItems : int + No of items + avgTransactionLength : int + The length of average transaction + outputFile: str + Name of the output file. + + :Methods: + + createUncertainTemporalDatabase(outputFile) + Create temporal database from DataFrame and store into outputFile + + **Credits:** + --------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + def __init__(self, totalTransactions: int, items: int, avgTransaction: int) -> None: + self._totalTransactions = totalTransactions + self._noOfItems = items + self._avgTransactionLength = avgTransaction + +
+[docs] + def createUncertainTemporalDatabase(self, outputFile: str) -> None: + """ + create transactional database and return outputFileName + + :param outputFile: file name or path to store database + :type outputFile: str + :return: outputFile name + """ + writer = open(outputFile, 'w+') + count = 1 + for i in range(self._totalTransactions): + length = _rd.randint(1, self._avgTransactionLength + 20) + st = str(count) + '\t' + st1 = str() + for i in range(length): + item = _rd.randint(1, self._noOfItems) + probability = _rd.uniform(0, 1) + st = st + str(item) + '\t' + st1 = st1 + str(probability) + '\t' + writer.write("%s:" % st) + writer.write("%s \n" % st1) + count += 1
+
+ + + +if __name__ == "__main__": + _ap = str() + _ap = createSyntheticUncertainTemporal(50000, 870, 10) + _ap.createUncertainTemporalDatabase("T10_uncertain_temp.txt") +else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticUncertainTransactions.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticUncertainTransactions.html new file mode 100644 index 000000000..fb0d4575c --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticUncertainTransactions.html @@ -0,0 +1,170 @@ + + + + + + PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTransactions — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTransactions

+import random as _rd
+import sys as _sys
+
+
+
+[docs] +class createSyntheticUncertainTransaction: + """ + This class create synthetic transaction database. + + :Attribute: + + totalTransactions : int + No of transactions + noOfItems : int + No of items + avgTransactionLength : str + The length of average transaction + outputFile: str + Name of the output file. + + :Methods: + + createUncertainTransactionalDatabase(outputFile) + Create uncertain transactional database and store into outputFile + + **Credits:** + ------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + def __init__(self, transactions: int, items: int, avgTransaction: int) -> None: + self._totalTransactions = transactions + self._noOfItems = items + self._avgTransactionLength = avgTransaction + +
+[docs] + def createUncertainTransactionalDatabase(self, outputFile: str) -> None: + """ + create transactional database and return outputFileName + + :param outputFile: file name or path to store database + :type outputFile: str + :return: outputFile name + """ + writer = open(outputFile, 'w+') + for i in range(self._totalTransactions): + length = _rd.randint(1, self._avgTransactionLength + 20) + st = str() + st1 = str() + for i in range(length): + item = _rd.randint(1, self._noOfItems) + probability = _rd.uniform(0, 1) + st = st + str(item) + '\t' + st1 = st1 + str(probability) + '\t' + writer.write("%s:" % st) + writer.write("%s \n" % st1)
+
+ + + +if __name__ == "__main__": + _ap = str() + _ap = createSyntheticUncertainTransaction(100000, 870, 10) + _ap.createUncertainTransactionalDatabase("T10_uncertain.txt") +else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticUtility.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticUtility.html new file mode 100644 index 000000000..c2fcf2296 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/createSyntheticUtility.html @@ -0,0 +1,177 @@ + + + + + + PAMI.extras.syntheticDataGenerator.createSyntheticUtility — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.createSyntheticUtility

+import random as _rd
+import sys as _sys
+
+
+
+[docs] +class createSyntheticUtility: + """ + + This class create synthetic utility database. + + :Attribute: + + totalTransactions : int + No of transactions + noOfItems : int + No of items + maxUtilRange: int + Maximum utility range + avgTransactionLength : int + The length of average transaction + outputFile: str + Name of the output file. + + :Methods: + + createUtilityDatabase(outputFile) + Create utility database from DataFrame and store into outputFile + + **Credits:** + --------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + def __init__(self, transactions: int, items: int, maxUtilRange: int, avgTransaction: int) -> None: + self._totalTransactions = transactions + self._noOfItems = items + self._maxUtilRange = maxUtilRange + self._avgTransactionLength = avgTransaction + +
+[docs] + def createUtilityDatabase(self, outputFile: str) -> None: + """ + create transactional database and return outputFileName + + :param outputFile: file name or path to store database + :type outputFile: str + :return: outputFile name + """ + writer = open(outputFile, 'w+') + for i in range(self._totalTransactions): + length = _rd.randint(1, self._avgTransactionLength + 20) + st = str() + st1 = str() + su = [] + for i in range(length): + item = _rd.randint(1, self._noOfItems) + utility = _rd.randint(1, self._maxUtilRange) + st = st + str(item) + '\t' + su.append(utility) + st1 = st1 + str(utility) + '\t' + summation = sum([i for i in su]) + st = st + ":" + str(summation) + ":" + writer.write("%s" % st) + writer.write("%s \n" % st1)
+
+ + +if __name__ == "__main__": + _ap = str() + _ap = createSyntheticUtility(100000, 870, 100, 10) + _ap.createUtilityDatabase("T10_util.txt") +else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/generateTemporal.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/generateTemporal.html new file mode 100644 index 000000000..3ed237bc6 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/generateTemporal.html @@ -0,0 +1,143 @@ + + + + + + PAMI.extras.syntheticDataGenerator.generateTemporal — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.generateTemporal

+import random
+
+
+
+[docs] +class generateTemporal: + __transactionSize: int + __numOfItems: int + __avgTransactionLength: int + __transactions: list[list[int]] + + def __init__(self, transactionSize: int, numOfItems: int, avgTransactionLength: int) -> None: + self.__transactionSize = transactionSize + self.__numOfItems = numOfItems + self.__avgTransactionLength = avgTransactionLength + + self.__transactions = list() + +
+[docs] + def generate(self) -> None: + for tid in range(self.__transactionSize): + length = random.randint(1, self.__avgTransactionLength * 2) + transaction = [random.randint(1, self.__numOfItems) + for _ in range(length)] + self.__transactions.append(transaction)
+ + +
+[docs] + def save(self, outputFile: str, sep="\t") -> None: + with open(outputFile, 'w') as f: + for tid, transaction in enumerate(self.__transactions): + f.write(f"{tid+1}\t{sep.join(map(str, transaction))}\n")
+
+ + + +if __name__ == "__main__": + obj = generateTemporal(10, 10, 5) + obj.generate() + obj.save("temporal_test.csv") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/generateTransactional.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/generateTransactional.html new file mode 100644 index 000000000..5d288a2ae --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/generateTransactional.html @@ -0,0 +1,143 @@ + + + + + + PAMI.extras.syntheticDataGenerator.generateTransactional — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.generateTransactional

+import random
+
+
+
+[docs] +class generateTransactional: + __transactionSize: int + __numOfItems: int + __avgTransactionLength: int + __transactions: list[list[int]] + + def __init__(self, transactionSize: int, numOfItems: int, avgTransactionLength: int) -> None: + self.__transactionSize = transactionSize + self.__numOfItems = numOfItems + self.__avgTransactionLength = avgTransactionLength + + self.__transactions = list() + +
+[docs] + def generate(self) -> None: + for tid in range(self.__transactionSize): + length = random.randint(1, self.__avgTransactionLength * 2) + transaction = [random.randint(1, self.__numOfItems) + for _ in range(length)] + self.__transactions.append(transaction)
+ + +
+[docs] + def save(self, outputFile: str, sep="\t") -> None: + with open(outputFile, 'w') as f: + for transaction in self.__transactions: + f.write(f"{sep.join(map(str, transaction))}\n")
+
+ + + +if __name__ == "__main__": + obj = generateTransactional(10, 10, 5) + obj.generate() + obj.save("transactional_test.csv") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/generateUncertainTemporal.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/generateUncertainTemporal.html new file mode 100644 index 000000000..ef0861282 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/generateUncertainTemporal.html @@ -0,0 +1,151 @@ + + + + + + PAMI.extras.syntheticDataGenerator.generateUncertainTemporal — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.generateUncertainTemporal

+import random
+
+
+
+[docs] +class generateUncertainTemporal: + __transactionSize: int + __numOfItems: int + __avgTransactionLength: int + __significant: int + __transactions: list[list[int]] + __probabilitis: list[list[float]] + + def __init__(self, transactionSize: int, numOfItems: int, avgTransactionLength: int, significant=2) -> None: + self.__transactionSize = transactionSize + self.__numOfItems = numOfItems + self.__avgTransactionLength = avgTransactionLength + self.__significant = significant + + self.__transactions = list() + self.__probabilitis = list() + +
+[docs] + def generate(self) -> None: + for tid in range(self.__transactionSize): + length = random.randint(1, self.__avgTransactionLength * 2) + transaction = [random.randint(1, self.__numOfItems) + for _ in range(length)] + self.__transactions.append(transaction) + probability = [round(random.uniform(0, 1), self.__significant) + for _ in range(length)] + self.__probabilitis.append(probability)
+ + +
+[docs] + def save(self, outputFile: str, sep="\t") -> None: + with open(outputFile, 'w') as f: + for tid, (transaction, probability) in enumerate(zip(self.__transactions, self.__probabilitis)): + f.write( + f"{tid+1}{sep}{sep.join(map(str, transaction))}:{round(sum(probability), self.__significant)}:{sep.join(map(str, probability))}\n")
+
+ + + +if __name__ == "__main__": + obj = generateUncertainTemporal(10, 10, 5) + obj.generate() + obj.save("uncertainTemporal_test.csv") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/generateUncertainTransactional.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/generateUncertainTransactional.html new file mode 100644 index 000000000..342e68d09 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/generateUncertainTransactional.html @@ -0,0 +1,151 @@ + + + + + + PAMI.extras.syntheticDataGenerator.generateUncertainTransactional — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.generateUncertainTransactional

+import random
+
+
+
+[docs] +class generateUncertainTransactional: + __transactionSize: int + __numOfItems: int + __avgTransactionLength: int + __significant: int + __transactions: list[list[int]] + __probabilitis: list[list[float]] + + def __init__(self, transactionSize: int, numOfItems: int, avgTransactionLength: int, significant=2) -> None: + self.__transactionSize = transactionSize + self.__numOfItems = numOfItems + self.__avgTransactionLength = avgTransactionLength + self.__significant = significant + + self.__transactions = list() + self.__probabilitis = list() + +
+[docs] + def generate(self) -> None: + for tid in range(self.__transactionSize): + length = random.randint(1, self.__avgTransactionLength * 2) + transaction = [random.randint(1, self.__numOfItems) + for _ in range(length)] + self.__transactions.append(transaction) + probability = [round(random.uniform(0, 1), self.__significant) + for _ in range(length)] + self.__probabilitis.append(probability)
+ + +
+[docs] + def save(self, outputFile: str, sep="\t") -> None: + with open(outputFile, 'w') as f: + for transaction, probability in zip(self.__transactions, self.__probabilitis): + f.write( + f"{sep.join(map(str, transaction))}:{round(sum(probability), self.__significant)}:{sep.join(map(str, probability))}\n")
+
+ + + +if __name__ == "__main__": + obj = generateUncertainTransactional(10, 10, 5) + obj.generate() + obj.save("uncertainTransactional_test.csv") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/generateUtilityTemporal.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/generateUtilityTemporal.html new file mode 100644 index 000000000..4bf7d9dbf --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/generateUtilityTemporal.html @@ -0,0 +1,192 @@ + + + + + + PAMI.extras.syntheticDataGenerator.generateUtilityTemporal — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.generateUtilityTemporal

+import random
+import pandas as pd
+
+
+
+[docs] +class generateUtilityTemporal: + __transactionSize: int + __numOfItems: int + __avgTransactionLength: int + __minUtilityValue: int + __maxUtilityValue: int + __minNumOfTimesAnItem: int + __maxNumOfTimesAnItem: int + __transactions: list[list[int]] + __internalUtility: dict[str, list[int]] + __externalUtility: list[list[int]] + + def __init__(self, transactionSize: int, numOfItems: int, avgTransactionLength: int, + minUtilityValue: int, maxUtilityValue: int, + minNumOfTimesAnItem: int, maxNumOfTimesAnItem: int) -> None: + self.__transactionSize = transactionSize + self.__numOfItems = numOfItems + self.__avgTransactionLength = avgTransactionLength + self.__minUtilityValue = minUtilityValue + self.__maxUtilityValue = maxUtilityValue + self.__minNumOfTimesAnItem = minNumOfTimesAnItem + self.__maxNumOfTimesAnItem = maxNumOfTimesAnItem + + self.__transactions = list() + self.__internalUtility = dict() + self.__externalUtility = list() + +
+[docs] + def generate(self) -> None: + items = [i+1 for i in range(self.__numOfItems)] + self.__transactions = [random.sample(items, random.randint( + 1, self.__avgTransactionLength*2)) for _ in range(self.__transactionSize)] + self.__generateInternalUtility() + self.__generateExternalUtility()
+ + + def __generateInternalUtility(self) -> None: + items = [i+1 for i in range(self.__numOfItems)] + utilityValues = [random.randint( + self.__minUtilityValue, self.__maxUtilityValue) for i in range(self.__numOfItems)] + self.__internalUtility = { + "items": items, "utilityValues": utilityValues} + + def __generateExternalUtility(self) -> None: + self.__externalUtility = [[random.randint(self.__minNumOfTimesAnItem, self.__maxNumOfTimesAnItem) for _ in range( + len(transaction))] for transaction in self.__transactions] + +
+[docs] + def save(self, outputFile: str, sep="\t", type="utility") -> None: + if (type == "utility"): + with open(outputFile, 'w') as f: + for tid, (transaction, exUtils) in enumerate(zip(self.__transactions, self.__externalUtility)): + f.write(f"{tid}\t{sep.join(map(str, transaction))}:") + utilityValues = [ + eu*self.__internalUtility["utilityValues"][item-1] for item, eu in zip(transaction, exUtils)] + f.write( + f"{sum(utilityValues)}:{sep.join(map(str, utilityValues))}\n") + + elif (type == "internal"): + with open(outputFile, "w") as f: + for item, utility in zip(self.__internalUtility["items"], self.__internalUtility["utilityValues"]): + f.write(f"{item}{sep}{utility}\n") + + elif (type == "external"): + with open(outputFile, "w") as f: + for transaction, exUtils in zip(self.__transactions, self.__externalUtility): + utils = list() + count = 0 + for item in [i+1 for i in range(self.__numOfItems)]: + if item in transaction: + utils.append(exUtils[count]) + count += 1 + else: + utils.append(0) + f.write(f"{sep.join(map(str,utils))}\n")
+
+ + + +if __name__ == "__main__": + obj = generateUtilityTemporal(10, 10, 5, 10, 100, 1, 10) + obj.generate() + obj.save("temporalUtility_test.csv", type="external") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/generateUtilityTransactional.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/generateUtilityTransactional.html new file mode 100644 index 000000000..f30958715 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/generateUtilityTransactional.html @@ -0,0 +1,192 @@ + + + + + + PAMI.extras.syntheticDataGenerator.generateUtilityTransactional — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.generateUtilityTransactional

+import random
+import pandas as pd
+
+
+
+[docs] +class generateUtilityTransactional: + __transactionSize: int + __numOfItems: int + __avgTransactionLength: int + __minUtilityValue: int + __maxUtilityValue: int + __minNumOfTimesAnItem: int + __maxNumOfTimesAnItem: int + __transactions: list[list[int]] + __internalUtility: dict[str, list[int]] + __externalUtility: list[list[int]] + + def __init__(self, transactionSize: int, numOfItems: int, avgTransactionLength: int, + minUtilityValue: int, maxUtilityValue: int, + minNumOfTimesAnItem: int, maxNumOfTimesAnItem: int) -> None: + self.__transactionSize = transactionSize + self.__numOfItems = numOfItems + self.__avgTransactionLength = avgTransactionLength + self.__minUtilityValue = minUtilityValue + self.__maxUtilityValue = maxUtilityValue + self.__minNumOfTimesAnItem = minNumOfTimesAnItem + self.__maxNumOfTimesAnItem = maxNumOfTimesAnItem + + self.__transactions = list() + self.__internalUtility = dict() + self.__externalUtility = list() + +
+[docs] + def generate(self) -> None: + items = [i+1 for i in range(self.__numOfItems)] + self.__transactions = [random.sample(items, random.randint( + 1, self.__avgTransactionLength*2)) for _ in range(self.__transactionSize)] + self.__generateInternalUtility() + self.__generateExternalUtility()
+ + + def __generateInternalUtility(self) -> None: + items = [i+1 for i in range(self.__numOfItems)] + utilityValues = [random.randint( + self.__minUtilityValue, self.__maxUtilityValue) for i in range(self.__numOfItems)] + self.__internalUtility = { + "items": items, "utilityValues": utilityValues} + + def __generateExternalUtility(self) -> None: + self.__externalUtility = [[random.randint(self.__minNumOfTimesAnItem, self.__maxNumOfTimesAnItem) for _ in range( + len(transaction))] for transaction in self.__transactions] + +
+[docs] + def save(self, outputFile: str, sep="\t", type="utility") -> None: + if (type == "utility"): + with open(outputFile, 'w') as f: + for transaction, exUtils in zip(self.__transactions, self.__externalUtility): + f.write(f"{sep.join(map(str, transaction))}:") + utilityValues = [ + eu*self.__internalUtility["utilityValues"][item-1] for item, eu in zip(transaction, exUtils)] + f.write( + f"{sum(utilityValues)}:{sep.join(map(str, utilityValues))}\n") + + elif (type == "internal"): + with open(outputFile, "w") as f: + for item, utility in zip(self.__internalUtility["items"], self.__internalUtility["utilityValues"]): + f.write(f"{item}{sep}{utility}\n") + + elif (type == "external"): + with open(outputFile, "w") as f: + for transaction, exUtils in zip(self.__transactions, self.__externalUtility): + utils = list() + count = 0 + for item in [i+1 for i in range(self.__numOfItems)]: + if item in transaction: + utils.append(exUtils[count]) + count += 1 + else: + utils.append(0) + f.write(f"{sep.join(map(str,utils))}\n")
+
+ + + +if __name__ == "__main__": + obj = generateUtilityTransactional(10, 10, 5, 10, 100, 1, 10) + obj.generate() + obj.save("transactionalUtility_test.csv", type="external") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/syntheticUtilityDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/syntheticUtilityDatabase.html new file mode 100644 index 000000000..db4b22a51 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/syntheticUtilityDatabase.html @@ -0,0 +1,223 @@ + + + + + + PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase

+import random as _rd
+
+
+
+[docs] +class syntheticUtilityDatabase: + """ + This class creates a synthetic utility database. + + Attributes: + totalTransactions (int): Number of transactions. + numOfItems (int): Number of items. + maxUtilRange (int): Maximum utility range. + avgTransactionLength (int): The length of average transaction. + + Methods: + __init__(totalTransactions, numOfItems, maxUtilRange, avgTransactionLength) + Constructor to initialize the database parameters. + createSyntheticUtilityDatabase(outputFile) + Create utility database and store it in the specified output file. + createRandomNumbers(n, targetSum) + Generate a list of random numbers with a specified target sum. + save(outputFile) + Save the generated utility database to a CSV file. + + Credits: + The complete program was written by A.Hemanth sree sai under the supervision of Professor Rage Uday Kiran. + """ + +
+[docs] + def __init__(self, totalTransactions: int, numOfItems: int, maxUtilRange: int, avgTransactionLength: int) -> None: + """ + Constructor to initialize the database parameters. + + Parameters: + totalTransactions (int): Number of transactions. + numOfItems (int): Number of items. + maxUtilRange (int): Maximum utility range. + avgTransactionLength (int): The length of average transaction. + """ + self.totalTransactions = totalTransactions + self.numOfItems = numOfItems + self.maxUtilRange = maxUtilRange + self.avgTransactionLength = avgTransactionLength + self.transactions = []
+ + +
+[docs] + def createSyntheticUtilityDatabase(self, outputFile: str) -> None: + """ + Create utility database and store it in the specified output file. + + Parameters: + outputFile (str): File name or path to store the database. + """ + if self.avgTransactionLength > self.numOfItems: + print("Error: avgTransactionLength cannot exceed numOfItems.") + return + + with open(outputFile, 'w') as writer: + for _ in range(self.totalTransactions): + length = _rd.randint(1, self.avgTransactionLength + 20) + items = [_rd.randint(1, self.numOfItems) for _ in range(length)] + utilities = [_rd.randint(1, self.maxUtilRange) for _ in range(length)] + + # Generating 13 random numbers with a target sum of 2000 + randomNumbers = self.createRandomNumbers(13, 2000) + + # Checking if avgTransactionLength exceeds numOfItems + if self.avgTransactionLength > self.numOfItems: + print("Error: avgTransactionLength cannot exceed numOfItems.") + return + + st = '\t'.join(map(str, items)) + '\t:' + str(sum(utilities)) + ':' + st1 = '\t'.join(map(str, randomNumbers)) + '\t' + + writer.write(f"{st}{st1}\n")
+ + +
+[docs] + def createRandomNumbers(self, n: int, targetSum: int) -> list[float]: + """ + Generate a list of random numbers with a specified target sum. + + Parameters: + n (int): Number of random numbers to generate. + targetSum (int): Target sum for the generated random numbers. + + Returns: + list: List of generated random numbers normalized and multiplied by the target sum. + """ + randNumbers = [_rd.uniform(0, 1) for _ in range(n)] + randSum = sum(randNumbers) + normalizedNumbers = [num / randSum for num in randNumbers] + result = [round(num * targetSum) for num in normalizedNumbers] + return result
+ + +
+[docs] + def save(self, outputFile: str) -> None: + """ + Save the generated utility database to a CSV file. + + Parameters: + outputFile (str): File name or path to store the CSV file. + """ + with open(outputFile, 'w') as f: + for transaction in self.transactions: + f.write('\t'.join(map(str, transaction)) + '\n')
+
+ + + +if __name__ == "__main__": + ap = syntheticUtilityDatabase(100000, 870, 100, 10) + ap.createSyntheticUtilityDatabase("T10_util-12.csv") +else: + print("Error! The number of input parameters does not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/temporalDatabaseGen.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/temporalDatabaseGen.html new file mode 100644 index 000000000..972e357d0 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/temporalDatabaseGen.html @@ -0,0 +1,195 @@ + + + + + + PAMI.extras.syntheticDataGenerator.temporalDatabaseGen — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.temporalDatabaseGen

+import random as _rd
+
+
+
+[docs] +class CreateSyntheticTemporal: + """ + This class creates a synthetic temporal database. + + Attributes: + total_transactions (int): Number of transactions. + num_of_items (int): Number of items. + avg_transaction_length (int): The length of average transaction. + + Methods: + create_temporal_database(output_file) + Create temporal database and store it in the specified output file. + + Credits: + The complete program was written by A.Hemanth sree sai under the supervision of Professor Rage Uday Kiran. + """ + + def __init__(self, total_transactions: int, num_of_items: int, avg_transaction_length: int) -> None: + """ + Constructor to initialize the database parameters. + + Parameters: + total_transactions (int): Number of transactions. + num_of_items (int): Number of items. + avg_transaction_length (int): The length of average transaction. + """ + self.total_transactions = total_transactions + self.num_of_items = num_of_items + self.avg_transaction_length = avg_transaction_length + +
+[docs] + def create_temporal_database(self, output_file: str) -> None: + """ + Create temporal database and store it in the specified output file. + + Parameters: + output_file (str): File name or path to store the database. + """ + if self.avg_transaction_length > self.num_of_items: + print("Error: avg_transaction_length cannot exceed num_of_items.") + return + + count = 1 + with open(output_file, 'w') as writer: + for _ in range(self.total_transactions): + length = _rd.randint(1, self.avg_transaction_length + 20) + st = str(count) + '\t' + random_numbers = self.generate_random_numbers(13, 2000) + + # Checking if avgTransactionLength exceeds numOfItems + if self.avg_transaction_length > self.num_of_items: + print("Error: avg_transaction_length cannot exceed num_of_items.") + return + + for _ in range(length): + item = _rd.randint(1, self.num_of_items) + st = st + str(item) + '\t' + writer.write("%s \n" % st) + count += 1
+ + +
+[docs] + def generate_random_numbers(self, n: int, target_sum: int) -> list[float]: + """ + Generate a list of random numbers with a specified target sum. + + Parameters: + n (int): Number of random numbers to generate. + target_sum (int): Target sum for the generated random numbers. + + Returns: + list: List of generated random numbers normalized and multiplied by the target sum. + """ + rand_numbers = [_rd.uniform(0, 1) for _ in range(n)] + rand_sum = sum(rand_numbers) + normalized_numbers = [num / rand_sum for num in rand_numbers] + result = [round(num * target_sum) for num in normalized_numbers] + return result
+
+ + + +if __name__ == "__main__": + ap = CreateSyntheticTemporal(100000, 870, 10) + ap.create_temporal_database("temporal_T10.csv") +else: + print("Error! The number of input parameters does not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/utilityDatabase.html b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/utilityDatabase.html new file mode 100644 index 000000000..a70fd1a30 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/syntheticDataGenerator/utilityDatabase.html @@ -0,0 +1,282 @@ + + + + + + PAMI.extras.syntheticDataGenerator.utilityDatabase — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.syntheticDataGenerator.utilityDatabase

+import numpy as np
+import pandas as pd
+import random
+
+
+
+[docs] +class UtilityDataGenerator: + def __init__(self, databaseSize, numberOfItems, averageLengthOfTransaction, + minimumInternalUtilityValue, maximumInternalUtilityValue, + minimumExternalUtilityValue, maximumExternalUtilityValue): + self.databaseSize = databaseSize + self.numberOfItems = numberOfItems + self.averageLengthOfTransaction = averageLengthOfTransaction + self.minInternalUtilityValue = minimumInternalUtilityValue + self.maxInternalUtilityValue = maximumInternalUtilityValue + self.minExternalUtilityValue = minimumExternalUtilityValue + self.maxExternalUtilityValue = maximumExternalUtilityValue + self.entries = [] + self.ExternalUtilityData = self.GenerateExternalUtilityData() + +
+[docs] + def GenerateExternalUtilityData(self): + items = range(1, self.numberOfItems + 1) + ExternalUtilityData = {f'item{item}': random.randint(100, 900) for item in items} + return ExternalUtilityData
+ + +
+[docs] + def Generate(self): + for entry_id in range(1, self.databaseSize + 1): + entry_length = np.random.randint(1, self.averageLengthOfTransaction * 2) + entry = np.random.randint(self.minInternalUtilityValue, self.maxInternalUtilityValue + 1, + size=self.numberOfItems) + entry_sum = entry.sum() + self.entries.append((entry, entry_sum))
+ + +
+[docs] + def Save(self, fileName): + with open(fileName, 'w') as file: + for idx, (entry, entry_sum) in enumerate(self.entries, start=1): + entry_str = '\t'.join(map(str, entry)) + file.write(f'{idx}\t{entry_str}\t{entry_sum}\n')
+ + +
+[docs] + def SaveItemsInternalUtilityValues(self, fileName): + items = random.sample(range(1, self.numberOfItems + 1), self.numberOfItems) + internal_utility_data = [np.random.randint(self.minInternalUtilityValue, self.maxInternalUtilityValue + 1) for _ + in items] + data = {'Item': items, 'Internal Utility Value': internal_utility_data} + df = pd.DataFrame(data) + df.to_csv(fileName, sep='\t', index=False)
+ + +
+[docs] + def Saveitemsexternalutilityvalues(self, fileName): + items = random.sample(range(1, self.numberOfItems + 1), self.numberOfItems) + data = {'Item': [f'item{item}' for item in items], + 'External Utility Value': list(self.ExternalUtilityData.values())} + df = pd.DataFrame(data) + df.to_csv(fileName, sep='\t', index=False)
+ + +
+[docs] + def GetUtilityData(self): + data = {'Entry ID': range(1, len(self.entries) + 1), + 'Entries': [entry for entry, _ in self.entries], + 'Sum': [entry_sum for _, entry_sum in self.entries]} + df = pd.DataFrame(data) + return df
+ + +
+[docs] + def GetInternalUtilityData(self): + items = random.sample(range(1, self.numberOfItems + 1), self.numberOfItems) + InternalUtilityData = [np.random.randint(self.minInternalUtilityValue, self.maxInternalUtilityValue + 1) for _ + in items] + data = {'Item': items, 'Internal Utility Value': InternalUtilityData} + df = pd.DataFrame(data) + return df
+ + +
+[docs] + def GetExternalUtilityData(self): + items = random.sample(range(1, self.numberOfItems + 1), self.numberOfItems) + data = {'Item': [f'item{item}' for item in items], + 'External Utility Value': list(self.ExternalUtilityData.values())} + df = pd.DataFrame(data) + return df
+ + +
+[docs] + def GenerateAndPrintItemPairs(self): + items = random.sample(range(1, self.numberOfItems + 1), 2) + item1_id = f'item{items[0]}' + item2_id = f'item{items[1]}' + item1_value = self.ExternalUtilityData[item1_id] + item2_value = self.ExternalUtilityData[item2_id] + sum_values = item1_value + item2_value + print(f"{item1_id} value: {item1_value}\t{item2_id} value: {item2_value}\tSum of values: {sum_values}") + + # Separate the sum with ' : ' + print(f"{item1_value}:{item2_value}:{sum_values}")
+
+ + + +if __name__ == "__main__": + data_generator = UtilityDataGenerator(100000, 2000, 10, 1, 100, 1, 10) + data_generator.Generate() + data_generator.Save("utility_data-6.csv") + data_generator.SaveItemsInternalUtilityValues("items_internal_utility.csv") + data_generator.Saveitemsexternalutilityvalues("items_external_utility.csv") + utility_data = data_generator.GetUtilityData() + InternalUtilityData = data_generator.GetInternalUtilityData() + ExternalUtilityData = data_generator.GetExternalUtilityData() + + for _ in range(10): # Print pairs for demonstration, adjust the range as needed + data_generator.GenerateAndPrintItemPairs() + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/topKPatterns.html b/sphinx/_build/html/_modules/PAMI/extras/topKPatterns.html new file mode 100644 index 000000000..c971054fd --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/topKPatterns.html @@ -0,0 +1,204 @@ + + + + + + PAMI.extras.topKPatterns — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for PAMI.extras.topKPatterns

+# topKPatterns is used to find top k length patterns in input file.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#     from PAMI.extras.syntheticDataGenerator import topKPatterns as tK
+#
+#     obj = tK.topKPatterns(" ", 10, "\t")
+#
+#     obj.save()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+
+
+[docs] +class topKPatterns: + """ + :Description: find top k length patterns in input file. + + :Attributes: + + inputFile : str + input file name or path + k : int + rank of pattern length. default is 10 + sep : str + separator which separate patterns in input file. default is tab space + + :Methods: + + getTopKPatterns() + return top k patterns as dict + storeTopKPatterns(outputFile) + store top k patterns into output file. + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.syntheticDataGenerator import topKPatterns as plt + + obj = plt.scatterPlotSpatialPoints(" ", 10, "\t" ) + + obj.save() + """ + def __init__(self, inputFile: str, k: int=10, sep: str='\t') -> None: + self.inputFile = inputFile + self.k = k + self.sep = sep + +
+[docs] + def getTopKPatterns(self) -> dict: + """ + get top k length patterns. user can defined k value. + + :return: top k length patterns as dictionary. top k patterns = {patternId: pattern} + """ + with open(self.inputFile, 'r') as f: + patterns = [[item for item in line.strip().split(':')][0].split(self.sep)[:-1] for line in f] + patterns = sorted(patterns, key=lambda x: len(x[0]), reverse=True) + return {patternId: patterns[patternId - 1] for patternId in range(1, int(self.k)+1)}
+ + +
+[docs] + def save(self, outputFile: str) -> None: + """ + store top k length patterns into file. user can defined k value. + + :param outputFile: output file name or path + :type outputFile: str + """ + with open(self.inputFile, 'r') as f: + patterns = [[item for item in line.strip().split(':')][0].split(self.sep)[:-1] for line in f] + patterns = sorted(patterns, key=lambda x: len(x[0]), reverse=True) + with open(outputFile, 'w') as f: + patternId = 1 + for pattern in patterns[:self.k]: + for item in pattern: + f.write(f'{patternId}\t{item}\n')
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/uncertaindb_convert.html b/sphinx/_build/html/_modules/PAMI/extras/uncertaindb_convert.html new file mode 100644 index 000000000..fed3f5be7 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/uncertaindb_convert.html @@ -0,0 +1,172 @@ + + + + + + PAMI.extras.uncertaindb_convert — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.extras.uncertaindb_convert

+# uncertaindb_convert is used to convert the given database and predict classes.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#     from PAMI.extras.syntheticDataGenerator import uncertaindb_convert as un
+#
+#     obj = un.predictedClass2Transaction(predicted_classes, 0.8)
+#
+#     obj.save()
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+
+
+[docs] +class predictedClass2Transaction: + """ + :Description: This is used to convert the given database and predict classes. + + :param predicted_classes: list: It is dense DataFrame + + :param minThreshold: int or float : minimum threshold User defined value. + + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.extras.syntheticDataGenerator import uncertaindb_convert as un + + obj = un.uncertaindb_convert(predicted_classes, 0.8) + + obj.save(oFile) + + """ + def __init__(self, predicted_classes: list,minThreshold: float =0.8) : + self.predicted_classes = predicted_classes + self.minThreshold = minThreshold +
+[docs] + def getBinaryTransaction(self,predicted_classes: list,minThreshold: float =0.8) -> dict: + self.predictions_dict ={} + for name, p, box in predicted_classes: + if p > minThreshold : + if name not in self.predictions_dict: + self.predictions_dict[name] = [p, ] + else: + self.predictions_dict[name].append(p) + return self.predictions_dict
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/extras/visualize/graphs.html b/sphinx/_build/html/_modules/PAMI/extras/visualize/graphs.html new file mode 100644 index 000000000..b480bd602 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/extras/visualize/graphs.html @@ -0,0 +1,159 @@ + + + + + + PAMI.extras.visualize.graphs — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for PAMI.extras.visualize.graphs

+
+import networkx as nx
+import matplotlib.pyplot as plt
+
+
+[docs] +class graphDatabase: + + def __init__(self, iFile) -> None: + self.iFile = iFile + +
+[docs] + def plot(self): + with open(self.iFile, 'r') as file: + lines = file.readlines() + + current_graph = None + graphs = [] + vertex_labels = {} + edge_labels = {} + + for line in lines: + if line.startswith('t #'): + if current_graph is not None: + graphs.append((current_graph, vertex_labels, edge_labels)) + current_graph = nx.Graph() + vertex_labels = {} + edge_labels = {} + elif line.startswith('v'): + _, vertex_id, label = line.split() + current_graph.add_node(int(vertex_id)) + vertex_labels[int(vertex_id)] = label + elif line.startswith('e'): + _, source, target, label = line.split() + current_graph.add_edge(int(source), int(target)) + edge_labels[(int(source), int(target))] = label + + if current_graph is not None: + graphs.append((current_graph, vertex_labels, edge_labels)) + + n_rows = int(len(graphs) ** 0.5) + n_cols = (len(graphs) // n_rows) + (len(graphs) % n_rows > 0) + + plt.figure(figsize=(n_cols * 4, n_rows * 4)) + + for i, (graph, vertex_labels, edge_labels) in enumerate(graphs): + ax = plt.subplot(n_rows, n_cols, i + 1) + pos = nx.spring_layout(graph) + nx.draw(graph, pos, labels=vertex_labels, ax=ax, with_labels=True, node_color='lightblue', + node_size=500, font_size=10, font_weight='bold') + nx.draw_networkx_edge_labels(graph, pos, edge_labels=edge_labels, ax=ax, font_color='black') + ax.set_title(f"Frequent Subgraph {i + 1}") + + plt.tight_layout() + plt.show()
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/faultTolerantFrequentPattern/basic/FTApriori.html b/sphinx/_build/html/_modules/PAMI/faultTolerantFrequentPattern/basic/FTApriori.html new file mode 100644 index 000000000..87115c6fa --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/faultTolerantFrequentPattern/basic/FTApriori.html @@ -0,0 +1,579 @@ + + + + + + PAMI.faultTolerantFrequentPattern.basic.FTApriori — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.faultTolerantFrequentPattern.basic.FTApriori

+# FTApriori is one of the fundamental algorithm to discover fault-tolerant frequent patterns in a transactional database.
+#
+# **Importing this algorithm into a python program**
+# ----------------------------------------------------------------
+#
+#
+#             from PAMI.faultTolerantFrequentPattern.basic import FTApriori as alg
+#
+#             obj = alg.FTApriori(inputFile,minSup,itemSup,minLength,faultTolerance)
+#
+#             obj.mine()
+#
+#             patterns = obj.getPatterns()
+#
+#             print("Total number of fault-tolerant frequent patterns:", len(patterns))
+#
+#             obj.save("outputFile")
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+     
+"""
+
+from PAMI.faultTolerantFrequentPattern.basic import abstract as _ab
+import pandas as pd
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+from deprecated import deprecated
+
+
+
+[docs] +class FTApriori(_ab._faultTolerantFrequentPatterns): + """ + + :Description: FT-Apriori is one of the fundamental algorithm to discover fault-tolerant frequent patterns in a transactional database. + This program employs apriori property (or downward closure property) to reduce the search space effectively. + + :Reference: Pei, Jian & Tung, Anthony & Han, Jiawei. (2001). Fault-Tolerant Frequent Pattern Mining: Problems and Challenges. + + :param iFile: str : + Name of the Input file to mine complete set of fault Tolerant frequent patterns + :param oFile: str : + Name of the output file to store complete set of falut Tolerant frequent patterns + :param minSup: float or int or str : + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + :param itemSup: int or float : + Frequency of an item + :param minLength: int : + minimum length of a pattern + :param faultTolerance: int + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + startTime : float + To record the start time of the mining process + + endTime : float + To record the completion time of the mining process + + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + + memoryUSS : float + To store the total amount of USS memory consumed by the program + + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + Database : list + To store the transactions of a database in list + + + **Methods to execute code on terminal** + ------------------------------------------ + + .. code-block:: console + + Format: + + (.venv) $ python3 FTApriori.py <inputFile> <outputFile> <minSup> <itemSup> <minLength> <faultTolerance> + + Example Usage: + + (.venv) $ python3 FTApriori.py sampleDB.txt patterns.txt 10.0 3.0 3 1 + + + .. note:: minSup will be considered in times of minSup and count of database transactions + + **Importing this algorithm into a python program** + ---------------------------------------------------------------- + .. code-block:: python + + from PAMI.faultTolerantFrequentPattern.basic import FTApriori as alg + + obj = alg.FTApriori(inputFile,minSup,itemSup,minLength,faultTolerance) + + obj.mine() + + patterns = obj.getPatterns() + + print("Total number of fault-tolerant frequent patterns:", len(patterns)) + + obj.save("outputFile") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + ---------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + _minSup = float() + _itemSup = float() + _minLength = int() + _faultTolerance = int() + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _mapSupport = {} + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + temp = [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + temp = self._iFile['Transactions'].tolist() + + for k in temp: + self._Database.append(set(k)) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(set(temp)) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(set(temp)) + except IOError: + print("File Not Found") + quit() + + def _convert(self, value) -> float: + """ + To convert the user specified minSup value + + :param value: user specified minSup value + + :type value: int or float + + :return: converted type + + :rtype: float + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + def _Count(self, k) -> Tuple[int, List[List[str]]]: + """ + :param k: list of items + + :type k: list + + :return: count of items in k and list + + :rtype: tuple + + """ + count = 0 + items = [] + k = list(k) + n = len(k) - self._faultTolerance + c = _ab._itertools.combinations(k, n) + count = 0 + for j in c: + j = list(j) + for i in self._Database: + if set(j).issubset(i): + count += 1 + items.append(i) + items = list(set(map(tuple, items))) + return len(items), items + + def _oneLengthFrequentItems(self) -> None: + self._mapSupport = {} + for li in self._Database: + for i in li: + if i not in self._mapSupport: + self._mapSupport[i] = 1 + else: + self._mapSupport[i] += 1 + self._mapSupport = {k: v for k, v in self._mapSupport.items() if v >= self._itemSup} + + def _countItemSupport(self, itemset) -> int: + """ + This function is used to count the itemSupport + + :param itemSet: frequent itemSet that generated + + :type itemset: list + + :return: count of items + + :rtype: int + + """ + tids = {} + res = True + count = 0 + for x in self._Database: + if abs(len(itemset) - len(set(x) & set(itemset))) <= self._faultTolerance: + count += 1 + return count + + def _getFaultPatterns(self) -> None: + l = [k for k, v in self._mapSupport.items()] + for i in range(0, len(l) + 1): + c = _ab._itertools.combinations(l, i) + for j in c: + res = self._countItemSupport(j) + if len(j) >= self._minLength and res >= self._minSup: + self._finalPatterns[tuple(j)] = res + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + Fault-tolerant frequent pattern mining process will start from here + """ + + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + Fault-tolerant frequent pattern mining process will start from here + """ + self._Database = [] + self._startTime = _ab._time.time() + self._creatingItemSets() + self._minSup = self._convert(self._minSup) + self._itemSup = self._convert(self._itemSup) + self._minLength = int(self._minLength) + self._faultTolerance = int(self._faultTolerance) + self._oneLengthFrequentItems() + + self._getFaultPatterns() + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Fault-Tolerant Frequent patterns were generated successfully using FTApriori algorithm ")
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + + :rtype: float + + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + + :rtype: float + + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + + :rtype: float + + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> pd.DataFrame: + """ + + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + + :rtype: pd.DataFrame + + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + s = str() + for i in a: + s = s + i + ' ' + data.append([s, b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def save(self, outFile) -> None: + """ + + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + + :type outFile: csvfile + + :return: None + + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s = str() + for i in x: + s = s + i + '\t' + s1 = s.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> Dict[Tuple[str, ...], int]: + """ + + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + + :rtype: dict + + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This is function is used to print the result + """ + print("Total number of Fault-Tolerant Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 7 or len(_ab._sys.argv) == 8: + if len(_ab._sys.argv) == 8: + _ap = FTApriori(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], + _ab._sys.argv[5], _ab._sys.argv[6], _ab._sys.argv[7], ) + if len(_ab._sys.argv) == 7: + _ap = FTApriori(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5], _ab._sys.argv[6]) + _ap.startMine() + _ap.mine() + print("Total number of Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/faultTolerantFrequentPattern/basic/FTFPGrowth.html b/sphinx/_build/html/_modules/PAMI/faultTolerantFrequentPattern/basic/FTFPGrowth.html new file mode 100644 index 000000000..426d2bd99 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/faultTolerantFrequentPattern/basic/FTFPGrowth.html @@ -0,0 +1,830 @@ + + + + + + PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth

+# FTFPGrowth algorithm aims to discover all fault-tolerant frequent patterns that may exist in a transactional database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------
+#
+#
+#             from PAMI.faultTolerantFrequentPattern.basic import FTFPGrowth as alg
+#
+#             obj = alg.FTFPGrowth(inputFile,minSup,itemSup,minLength,faultTolerance)
+#
+#             obj.mine()
+#
+#             faultTolerantFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of fault-tolerant frequent patterns:", len(faultTolerantFrequentPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+     
+"""
+
+
+
+from PAMI.faultTolerantFrequentPattern.basic import abstract as _fp
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+import pandas as pd
+from deprecated import deprecated
+
+_minSup = str()
+_fp._sys.setrecursionlimit(20000)
+
+
+class _Node:
+    """
+    A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+
+        itemId: int
+            storing item of a node
+        counter: int
+            To maintain the support of node
+        parent: node
+            To maintain the parent of node
+        children: list
+            To maintain the children of node
+
+    :Methods:
+
+        addChild(node)
+            Updates the nodes children list and parent for the given node
+    """
+
+    def __init__(self, item: int, children: Dict[int, '_Node']) -> None:
+        self.itemId = item
+        self.counter = 1
+        self.parent = None
+        self.children = children
+
+    def addChild(self, node: '_Node') -> None:
+        """
+        Retrieving the child from the tree
+
+        :param node: Child Node
+
+        :type node: Nodes
+
+        :return: Updates the children nodes and parent nodes
+
+        """
+        self.children[node.itemId] = node
+        node.parent = self
+
+
+class _Tree:
+    """
+    A class used to represent the frequentPatternGrowth tree structure
+
+    :Attributes:
+
+        root : Node
+            The first node of the tree set to Null.
+        summaries : dictionary
+            Stores the nodes itemId which shares same itemId
+        info : dictionary
+            frequency of items in the transactions
+
+    :Methods:
+
+        addTransaction(transaction, freq)
+            adding items of  transactions into the tree as nodes and freq is the count of nodes
+        getFinalConditionalPatterns(node)
+            getting the conditional patterns from fp-tree for a node
+        getConditionalPatterns(patterns, frequencies)
+            sort the patterns by removing the items with lower minSup
+        generatePatterns(prefix)
+            generating the patterns from fp-tree
+    """
+
+    def __init__(self) -> None:
+        self.headerList = []
+        self.root = _Node(None, {})
+        self.summaries = {}
+        self.info = {}
+
+    def addTransaction(self, transaction: List[int], count: int) -> None:
+        """
+        Adding transaction into tree
+
+        :param transaction: it represents the one transaction in database
+
+        :type transaction: list
+
+        :param count: frequency of item
+
+        :type count: int
+
+        :return: None
+
+        """
+
+        # This method takes transaction as input and returns the tree
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _Node(transaction[i], {})
+                newNode.freq = count
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].append(newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+                currentNode.freq += count
+
+    def getFinalConditionalPatterns(self, alpha: int) -> Tuple[List[List[int]], List[int], Dict[int, int]]:
+        """
+        Generates the conditional patterns for a node
+
+        :param alpha: node to generate conditional patterns
+
+        :type alpha: int
+
+        :return:returns conditional patterns, frequency of each item in conditional patterns
+
+        :rtype: tuple, List, Dict
+
+        """
+        finalPatterns = []
+        finalFreq = []
+        for i in self.summaries[alpha]:
+            set1 = i.freq
+            set2 = []
+            while i.parent.itemId is not None:
+                set2.append(i.parent.itemId)
+                i = i.parent
+            if len(set2) > 0:
+                set2.reverse()
+                finalPatterns.append(set2)
+                finalFreq.append(set1)
+        finalPatterns, finalFreq, info = self.getConditionalTransactions(finalPatterns, finalFreq)
+        return finalPatterns, finalFreq, info
+
+    @staticmethod
+    def getConditionalTransactions(ConditionalPatterns: List[List[int]], conditionalFreq: List[int]) -> Tuple[List[List[int]], List[int], Dict[int, int]]:
+        """
+        To calculate the frequency of items in conditional patterns and sorting the patterns
+
+        :Parameters:
+            ConditionalPatterns: paths of a node
+            conditionalFreq: frequency of each item in the path
+
+        :type ConditionalPatterns: List
+
+        :type conditionalFreq: List
+
+        :return: conditional patterns and frequency of each item in transactions
+
+        :rtype: Tuple, List, Dict
+
+        """
+        global _minSup
+        pat = []
+        freq = []
+        data1 = {}
+        for i in range(len(ConditionalPatterns)):
+            for j in ConditionalPatterns[i]:
+                if j in data1:
+                    data1[j] += conditionalFreq[i]
+                else:
+                    data1[j] = conditionalFreq[i]
+        up_dict = {k: v for k, v in data1.items() if v >= _minSup}
+        count = 0
+        for p in ConditionalPatterns:
+            p1 = [v for v in p if v in up_dict]
+            trans = sorted(p1, key=lambda x: (up_dict.get(x), -x), reverse=True)
+            if len(trans) > 0:
+                pat.append(trans)
+                freq.append(conditionalFreq[count])
+            count += 1
+        return pat, freq, up_dict
+
+    def generatePatterns(self, prefix: List[int]) -> Generator[Tuple[List[int], int], None, None]:
+        """
+        To generate the frequent patterns
+
+        :parameters:
+            prefix: an empty list
+
+        :type prefix: List
+
+        :return:Frequent patterns that are extracted from fp-tree
+
+        :rtype: Typle, None, None
+        """
+        for i in sorted(self.summaries, key=lambda x: (self.info.get(x), -x)):
+            pattern = prefix[:]
+            pattern.append(i)
+            yield pattern, self.info[i]
+            patterns, freq, info = self.getFinalConditionalPatterns(i)
+            conditionalTree = _Tree()
+            conditionalTree.info = info.copy()
+            for pat in range(len(patterns)):
+                conditionalTree.addTransaction(patterns[pat], freq[pat])
+            if len(patterns) > 0:
+                for q in conditionalTree.generatePatterns(pattern):
+                    yield q
+
+
+
+[docs] +class FTFPGrowth(_fp._faultTolerantFrequentPatterns): + """ + :Description: FPGrowth is one of the fundamental algorithm to discover frequent patterns in a transactional database. + It stores the database in compressed fp-tree decreasing the memory usage and extracts the + patterns from tree.It employs downward closure property to reduce the search space effectively. + + :Reference: Han, J., Pei, J., Yin, Y. et al. Mining Frequent Patterns without Candidate Generation: A Frequent-Pattern + Tree Approach. Data Mining and Knowledge Discovery 8, 53–87 (2004). https://doi.org/10.1023 + + :param iFile: file : + Name of the Input file to mine complete set of fault Tolerant frequent patterns + :param oFile: str : + Name of the output file to store complete set of falut Tolerant frequent patterns + :param minSup: float or int or str : + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + :param sep : str : + This variable is used to distinguish items from one another in a transaction. The default separator is tab space or \t. + However, the users can override their default separator. + + + :Attributes: + + startTime: float : + To record the start time of the mining process + endTime: float : + To record the completion time of the mining process + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transactions + tree : class + it represents the Tree class + finalPatterns : dict + it represents to store the patterns + + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to an output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets() + Scans the dataset or dataframes and stores in list format + frequentOneItem() + Extracts the one-frequent patterns from transactions + + **Executing the code on terminal:** + ---------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 FPGrowth.py <inputFile> <outputFile> <minSup> + + Example Usage: + + (.venv) $ python3 FPGrowth.py sampleDB.txt patterns.txt 10.0 + + .. note:: minSup will be considered in times of minSup and count of database transactions + + + **Sample run of the importing code:** + ------------------------------------------- + .. code-block:: python + + from PAMI.faultTolerantFrequentPattern.basic import FTFPGrowth as alg + + obj = alg.FTFPGrowth(inputFile,minSup,itemSup,minLength,faultTolerance) + + obj.mine() + + patterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(patterns)) + + obj.save(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + --------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.\n + + """ + + __startTime = float() + __endTime = float() + _minSup = str() + __finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + __memoryUSS = float() + __memoryRSS = float() + __Database = [] + __mapSupport = {} + __lno = 0 + __tree = _Tree() + __rank = {} + __rankDup = {} + + def __init__(self, iFile: Union[str, pd.DataFrame], minSup: Union[int, float, str], itemSup: float, minLength: int, faultTolerance: int, sep: str='\t') -> None: + super().__init__(iFile, minSup, itemSup, minLength, faultTolerance, sep) + + def __creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self.__Database = [] + if isinstance(self._iFile, _fp._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self.__Database = self._iFile['Transactions'].tolist() + + # print(self.Database) + if isinstance(self._iFile, str): + if _fp._validators.url(self._iFile): + data = _fp._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self.__Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self.__Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def __convert(self, value: Union[int, float, str]) -> Union[int, float]: + """ + + To convert the type of user specified minSup value + + :param value: user specified minSup value + + :type value: Union[int, float, str] + + :return: converted type + + :rtype: int, float + + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self.__Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self.__Database) * value) + else: + value = int(value) + return value + + def __frequentOneItem(self) -> List[str]: + """ + + Generating One frequent items sets + + :return: one frequency items set + + :rtype: List + + """ + self.__mapSupport = {} + for tr in self.__Database: + for i in range(0, len(tr)): + if tr[i] not in self.__mapSupport: + self.__mapSupport[tr[i]] = 1 + else: + self.__mapSupport[tr[i]] += 1 + self.__mapSupport = {k: v for k, v in self.__mapSupport.items() if v >= self._minSup} + genList = [k for k, v in sorted(self.__mapSupport.items(), key=lambda x: x[1], reverse=True)] + self.__rank = dict([(index, item) for (item, index) in enumerate(genList)]) + return genList + + def __updateTransactions(self, itemSet: List[str]) -> List[List[int]]: + """ + Updates the items in transactions with rank of items according to their support + + :Example: oneLength = {'a':7, 'b': 5, 'c':'4', 'd':3} + rank = {'a':0, 'b':1, 'c':2, 'd':3} + + :param itemSet: list of one-frequent items + + :type itemSet: List + + :return: list of updated items in transactions with rank of items according to their support + + :rtype: List + + """ + list1 = [] + for tr in self.__Database: + list2 = [] + for i in range(len(tr)): + if tr[i] in itemSet: + list2.append(self.__rank[tr[i]]) + if len(list2) >= 1: + list2.sort() + list1.append(list2) + return list1 + + @staticmethod + def __buildTree(transactions: List[List[int]], info: Dict[int, int]) -> _Tree: + """ + Builds the tree with updated transaction + + :param transactions: updated transactions + + :type transactions: List + + :param info: support details of each item in transactions + + :type info: Dict + + :returns: transactions compressed in fp-tree + + :rtype: Tree + + """ + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(transactions)): + rootNode.addTransaction(transactions[i], 1) + return rootNode + + def __savePeriodic(self, itemSet: List[int]) -> str: + """ + The duplication items and their ranks + + :param itemSet: frequent itemSet that generated + + :type itemSet: List + + :returns: patterns with original item names + + :rtype: String + + """ + temp = str() + for i in itemSet: + temp = temp + self.__rankDup[i] + "\t" + return temp + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + Main program to start the operation + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + Main program to start the operation + """ + global _minSup + self.__startTime = _fp._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minSup is None: + raise Exception("Please enter the Minimum Support") + self.__creatingItemSets() + self._minSup = self.__convert(self._minSup) + _minSup = self._minSup + itemSet = self.__frequentOneItem() + updatedTransactions = self.__updateTransactions(itemSet) + for x, y in self.__rank.items(): + self.__rankDup[y] = x + info = {self.__rank[k]: v for k, v in self.__mapSupport.items()} + __Tree = self.__buildTree(updatedTransactions, info) + patterns = __Tree.generatePatterns([]) + self.__finalPatterns = {} + for k in patterns: + s = self.__savePeriodic(k[0]) + self.__finalPatterns[str(s)] = k[1] + print("Frequent patterns were generated successfully using frequentPatternGrowth algorithm") + self.__endTime = _fp._time.time() + self.__memoryUSS = float() + self.__memoryRSS = float() + process = _fp._psutil.Process(_fp._os.getpid()) + self.__memoryUSS = process.memory_full_info().uss + self.__memoryRSS = process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + + :rtype: float + + """ + + return self.__memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + + :rtype: float + + """ + + return self.__memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + + :rtype: float + + """ + + return self.__endTime - self.__startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> pd.DataFrame: + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + + :rtype: pd.DataFrame + + """ + + dataframe = {} + data = [] + for a, b in self.__finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataframe = _fp._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataframe
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + + :type outFile: csv file + + :return: None + + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self.__finalPatterns.items(): + s1 = x.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> Dict[str, int]: + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + + :rtype: dict + + """ + return self.__finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + """ + print("Total number of Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_fp._sys.argv) == 7 or len(_fp._sys.argv) == 8: + if len(_fp._sys.argv) == 8: + _ap = FTFPGrowth(_fp._sys.argv[1], _fp._sys.argv[3], _fp._sys.argv[4], + _fp._sys.argv[5], _fp._sys.argv[6], _fp._sys.argv[7]) + if len(_fp._sys.argv) == 7: + _ap = FTFPGrowth(_fp._sys.argv[1], _fp._sys.argv[3], _fp._sys.argv[4], _fp._sys.argv[5], _fp._sys.argv[6]) + _ap.startMine() + _ap.mine() + print("Total number of Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_fp._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/frequentPattern/basic/Apriori.html b/sphinx/_build/html/_modules/PAMI/frequentPattern/basic/Apriori.html new file mode 100644 index 000000000..d5cd0b78c --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/frequentPattern/basic/Apriori.html @@ -0,0 +1,534 @@ + + + + + + PAMI.frequentPattern.basic.Apriori — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.frequentPattern.basic.Apriori

+# Apriori is one of the fundamental algorithm to discover frequent patterns in a transactional database. This program employs apriori property (or downward closure property) to  reduce the search space effectively. This algorithm employs breadth-first search technique to find the complete set of frequent patterns in a transactional database.
+#
+# **Importing this algorithm into a python program**
+# ----------------------------------------------------
+#
+#             import PAMI.frequentPattern.basic.Apriori as alg
+#
+#             obj = alg.Apriori(iFile, minSup)
+#
+#             obj.mine()
+#
+#             frequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(frequentPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+from PAMI.frequentPattern.basic import abstract as _ab
+from typing import Dict, Union
+from deprecated import deprecated
+
+
+
+[docs] +class Apriori(_ab._frequentPatterns): + """ + :Description: Apriori is one of the fundamental algorithm to discover frequent patterns in a transactional database. This program employs apriori property (or downward closure property) to reduce the search space effectively. This algorithm employs breadth-first search technique to find the complete set of frequent patterns in a transactional database. + + :Reference: Agrawal, R., Imieli ́nski, T., Swami, A.: Mining association rules between sets of items in large databases. + In: SIGMOD. pp. 207–216 (1993), https://doi.org/10.1145/170035.170072 + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + + :Attributes: + + startTime : float + To record the start time of the mining process + + endTime : float + To record the completion time of the mining process + + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + + memoryUSS : float + To store the total amount of USS memory consumed by the program + + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + Database : list + To store the transactions of a database in list + + + + **Methods to execute code on terminal** + ---------------------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 Apriori.py <inputFile> <outputFile> <minSup> + + Example Usage: + + (.venv) $ python3 Apriori.py sampleDB.txt patterns.txt 10.0 + + .. note:: minSup will be considered in percentage of database transactions + + + **Importing this algorithm into a python program** + ----------------------------------------------------- + + .. code-block:: python + + import PAMI.frequentPattern.basic.Apriori as alg + + obj = alg.Apriori(iFile, minSup) + + obj.mine() + + frequentPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(frequentPatterns)) + + obj.save(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + + **Credits:** + ------------- + + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + _minSup = float() + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + temp = [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + temp = self._iFile['Transactions'].tolist() + + for k in temp: + self._Database.append(set(k)) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(set(temp)) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(set(temp)) + except IOError: + print("File Not Found") + quit() + + def _convert(self, value: Union[int, float, str]) -> Union[int, float]: + """ + To convert the user specified minSup value + + :param value: user specified minSup value + + :type value: int or float or str + + :return: converted type + + :rtype: int or float + + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + +
+[docs] + @deprecated( + "It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + Frequent pattern mining process will start from here + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + Frequent pattern mining process will start from here + """ + self._Database = [] + self._startTime = _ab._time.time() + + self._creatingItemSets() + + self._minSup = self._convert(self._minSup) + + items = {} + index = 0 + for line in self._Database: + for item in line: + if tuple([item]) in items: + items[tuple([item])].append(index) + else: + items[tuple([item])] = [index] + index += 1 + + # sort by length in descending order + items = dict(sorted(items.items(), key=lambda x: len(x[1]), reverse=True)) + + cands = [] + fileData = {} + for key in items: + if len(items[key]) >= self._minSup: + cands.append(key) + self._finalPatterns["\t".join(key)] = len(items[key]) + fileData[key] = set(items[key]) + else: + break + + while cands: + newKeys = [] + for i in range(len(cands)): + for j in range(i + 1, len(cands)): + if cands[i][:-1] == cands[j][:-1]: + newCand = cands[i] + tuple([cands[j][-1]]) + intersection = fileData[tuple([newCand[0]])] + for k in range(1, len(newCand)): + intersection = intersection.intersection(fileData[tuple([newCand[k]])]) + if len(intersection) >= self._minSup: + newKeys.append(newCand) + newCand = "\t".join(newCand) + self._finalPatterns[newCand] = len(intersection) + del cands + cands = newKeys + del newKeys + + process = _ab._psutil.Process(_ab._os.getpid()) + self._endTime = _ab._time.time() + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Frequent patterns were generated successfully using Apriori algorithm ")
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + + :rtype: float + + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + + :rtype: float + + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + + :rtype: float + + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + + :rtype: pd.DataFrame + + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + # dataFrame = dataFrame.replace(r'\r+|\n+|\t+',' ', regex=True) + return dataFrame
+ + +
+[docs] + def save(self, outFile) -> None: + """ + + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + + :type outFile: csvfile + + :return: None + + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> Dict[str, int]: + """ + + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + + :rtype: dict + + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the result + """ + print("Total number of Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = Apriori(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = Apriori(_ab._sys.argv[1], _ab._sys.argv[3]) + _ap.startMine() + _ap.mine() + print("Total number of Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ap._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/frequentPattern/basic/ECLAT.html b/sphinx/_build/html/_modules/PAMI/frequentPattern/basic/ECLAT.html new file mode 100644 index 000000000..a37d879de --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/frequentPattern/basic/ECLAT.html @@ -0,0 +1,549 @@ + + + + + + PAMI.frequentPattern.basic.ECLAT — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.frequentPattern.basic.ECLAT

+# ECLAT is one of the fundamental algorithm to discover frequent patterns in a transactional database.
+#
+# **Importing this algorithm into a python program**
+# ------------------------------------------------------------------
+#
+#             import PAMI.frequentPattern.basic.ECLAT as alg
+#
+#             obj = alg.ECLAT(iFile, minSup)
+#
+#             obj.mine()
+#
+#             frequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(frequentPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+from PAMI.frequentPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+from deprecated import deprecated
+
+
+[docs] +class ECLAT(_ab._frequentPatterns): + """ + :Description: ECLAT is one of the fundamental algorithm to discover frequent patterns in a transactional database. + + :Reference: Mohammed Javeed Zaki: Scalable Algorithms for Association Mining. IEEE Trans. Knowl. Data Eng. 12(3): + 372-390 (2000), https://ieeexplore.ieee.org/document/846291 + + :param iFile: str : + Name of the Input file to mine complete set of frequent pattern's + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + startTime : float + To record the start time of the mining process + + endTime : float + To record the completion time of the mining process + + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + + memoryUSS : float + To store the total amount of USS memory consumed by the program + + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + Database : list + To store the transactions of a database in list + + + **Methods to execute code on terminal** + ------------------------------------------ + + .. code-block:: console + + Format: + + (.venv) $ python3 ECLAT.py <inputFile> <outputFile> <minSup> + + Example Usage: + + (.venv) $ python3 ECLAT.py sampleDB.txt patterns.txt 10.0 + + .. note:: minSup will be considered in percentage of database transactions + + + **Importing this algorithm into a python program** + ------------------------------------------------------------------ + .. code-block:: python + + import PAMI.frequentPattern.basic.ECLAT as alg + + obj = alg.ECLAT(iFile, minSup) + + obj.mine() + + frequentPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(frequentPatterns)) + + obj.save(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + + **Credits:** + ---------------------- + + The complete program was written by Kundai under the supervision of Professor Rage Uday Kiran. + + """ + + _minSup = float() + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + + def _creatingItemSets(self) -> float: + """ + Storing the complete transactions of the database/input file in a database variable + + :return: the complete transactions of the database/input file in a database variable + + :rtype: float + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _getUniqueItemList(self) -> list: + """ + + Generating one frequent patterns + + :return: list of unique patterns + + :rtype: list + + """ + self._finalPatterns = {} + candidate = {} + uniqueItem = [] + for i in range(len(self._Database)): + for j in range(len(self._Database[i])): + if self._Database[i][j] not in candidate: + candidate[self._Database[i][j]] = {i} + else: + candidate[self._Database[i][j]].add(i) + for key, value in candidate.items(): + supp = len(value) + if supp >= self._minSup: + self._finalPatterns[key] = [value] + uniqueItem.append(key) + uniqueItem.sort() + return uniqueItem + + def _generateFrequentPatterns(self, candidateFrequent: list) -> None: + """ + + It will generate the combinations of frequent items + + :param candidateFrequent :it represents the items with their respective transaction identifiers + + :type candidateFrequent: list + + :return: None + + """ + new_freqList = [] + for i in range(0, len(candidateFrequent)): + item1 = candidateFrequent[i] + i1_list = item1.split() + for j in range(i + 1, len(candidateFrequent)): + item2 = candidateFrequent[j] + i2_list = item2.split() + if i1_list[:-1] == i2_list[:-1]: + interSet = self._finalPatterns[item1][0].intersection(self._finalPatterns[item2][0]) + if len(interSet) >= self._minSup: + newKey = item1 + "\t" + i2_list[-1] + self._finalPatterns[newKey] = [interSet] + new_freqList.append(newKey) + else: break + + if len(new_freqList) > 0: + self._generateFrequentPatterns(new_freqList) + + def _convert(self, value) -> float: + """ + + To convert the user specified minSup value + + :param value: user specified minSup value + + :return: converted type + + :rtype: float + + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + Frequent pattern mining process will start from here + """ + + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + Frequent pattern mining process will start from here + """ + + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minSup is None: + raise Exception("Please enter the Minimum Support") + self._creatingItemSets() + self._minSup = self._convert(self._minSup) + uniqueItemList = self._getUniqueItemList() + self._generateFrequentPatterns(uniqueItemList) + for x, y in self._finalPatterns.items(): + self._finalPatterns[x] = len(y[0]) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Frequent patterns were generated successfully using ECLAT algorithm")
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + + :rtype: float + + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + + :rtype: float + + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + + :rtype: pd.DataFrame + + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + + :type outFile: csvfile + + :return: None + + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getPatterns(self) -> dict: + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + Function used to print the results + """ + print("Total number of Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = ECLAT(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = ECLAT(_ab._sys.argv[1], _ab._sys.argv[3]) + _ap.startMine() + _ap.mine() + print("Total number of Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print(_ap.getPatternsAsDataFrame()) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/frequentPattern/basic/ECLATDiffset.html b/sphinx/_build/html/_modules/PAMI/frequentPattern/basic/ECLATDiffset.html new file mode 100644 index 000000000..46d6b90fa --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/frequentPattern/basic/ECLATDiffset.html @@ -0,0 +1,530 @@ + + + + + + PAMI.frequentPattern.basic.ECLATDiffset — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.frequentPattern.basic.ECLATDiffset

+# ECLATDiffest uses diffset to extract the frequent patterns in a transactional database.
+
+# **Importing this algorithm into a python program**
+# ---------------------------------------------------------
+#
+#             import PAMI.frequentPattern.basic.ECLATDiffset as alg
+#
+#             obj = alg.ECLATDiffset(iFile, minSup)
+#
+#             obj.mine()
+#
+#             frequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(frequentPatterns))
+#
+#             obj.savePatterns(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+
+# from abstract import *
+
+from PAMI.frequentPattern.basic import abstract as _ab
+from deprecated import deprecated
+
+
+
+[docs] +class ECLATDiffset(_ab._frequentPatterns): + """ + :Description: ECLATDiffset uses diffset to extract the frequent patterns in a transactional database. + + :Reference: KDD '03: Proceedings of the ninth ACM SIGKDD international conference on Knowledge discovery and data mining + August 2003 Pages 326–335 https://doi.org/10.1145/956750.956788 + + :param iFile: str : + Name of the Input file to mine complete set of frequent pattern's + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + startTime : float + To record the start time of the mining process + + endTime : float + To record the completion time of the mining process + + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + + memoryUSS : float + To store the total amount of USS memory consumed by the program + + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + Database : list + To store the transactions of a database in list + + + **Methods to execute code on terminal** + ------------------------------------------ + + .. code-block:: console + + Format: + + (.venv) $ python3 ECLATDiffset.py <inputFile> <outputFile> <minSup> + + Example Usage: + + (.venv) $ python3 ECLATDiffset.py sampleDB.txt patterns.txt 10.0 + + .. note:: minSup will be considered in percentage of database transactions + + + **Importing this algorithm into a python program** + --------------------------------------------------------- + .. code-block:: python + + import PAMI.frequentPattern.basic.ECLATDiffset as alg + + obj = alg.ECLATDiffset(iFile, minSup) + + obj.mine() + + frequentPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(frequentPatterns)) + + obj.savePatterns(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + + **Credits:** + ------------------- + + The complete program was written by Kundai under the supervision of Professor Rage Uday Kiran. + + """ + + _minSup = float() + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _diffSets = {} + _trans_set = set() + + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _convert(self, value): + """ + To convert the user specified minSup value + :param value: user specified minSup value + :return: converted type + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + def _getUniqueItemList(self): + + # tidSets will store all the initial tids + tidSets = {} + # uniqueItem will store all frequent 1 items + uniqueItem = [] + for line in self._Database: + transNum = 0 + # Database = [set([i.rstrip() for i in transaction.split('\t')]) for transaction in f] + for transaction in self._Database: + transNum += 1 + self._trans_set.add(transNum) + for item in transaction: + if item in tidSets: + tidSets[item].add(transNum) + else: + tidSets[item] = {transNum} + for key, value in tidSets.items(): + supp = len(value) + if supp >= self._minSup: + self._diffSets[key] = [supp, self._trans_set.difference(value)] + uniqueItem.append(key) + # for x, y in self._diffSets.items(): + # print(x, y) + uniqueItem.sort() + # print() + return uniqueItem + + def _runDeclat(self, candidateList): + """ + It will generate the combinations of frequent items + :param candidateList :it represents the items with their respective transaction identifiers + :type candidateList: list + :return: returning transaction dictionary + :rtype: dict + """ + + newList = [] + for i in range(0, len(candidateList)): + item1 = candidateList[i] + iList = item1.split() + for j in range(i + 1, len(candidateList)): + item2 = candidateList[j] + jList = item2.split() + if iList[:-1] == jList[:-1]: + unionDiffSet = self._diffSets[item2][1].difference(self._diffSets[item1][1]) + unionSup = self._diffSets[item1][0] - len(unionDiffSet) + if unionSup >= self._minSup: + newKey = item1 + "\t" + jList[-1] + self._diffSets[newKey] = [unionSup, unionDiffSet] + newList.append(newKey) + else: + break + + if len(newList) > 0: + self._runDeclat(newList) + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + Frequent pattern mining process will start from here + """ + + self.mine()
+ + +
+[docs] + def mine(self): + """ + Frequent pattern mining process will start from here + """ + + self._startTime = _ab._time.time() + self._Database = [] + self._finalPatterns = {} + self._diffSets = {} + self._trans_set = set() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minSup is None: + raise Exception("Please enter the Minimum Support") + self._creatingItemSets() + #print(len(self._Database)) + self._minSup = self._convert(self._minSup) + uniqueItemList = [] + uniqueItemList = self._getUniqueItemList() + self._runDeclat(uniqueItemList) + self._finalPatterns = self._diffSets + #print(len(self._finalPatterns), len(uniqueItemList)) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Frequent patterns were generated successfully using ECLAT Diffset algorithm")
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b[0]]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to an output file + :param outFile: name of the output file + :type outFile: csvfile + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y[0]) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Total number of Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = ECLATDiffset(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = ECLATDiffset(_ab._sys.argv[1], _ab._sys.argv[3]) + _ap.startMine() + _ap.mine() + print("Total number of Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print(_ap.getPatternsAsDataFrame()) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/frequentPattern/basic/ECLATbitset.html b/sphinx/_build/html/_modules/PAMI/frequentPattern/basic/ECLATbitset.html new file mode 100644 index 000000000..c46c34b51 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/frequentPattern/basic/ECLATbitset.html @@ -0,0 +1,522 @@ + + + + + + PAMI.frequentPattern.basic.ECLATbitset — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.frequentPattern.basic.ECLATbitset

+# ECLATbitset is one of the fundamental algorithm to discover frequent patterns in a transactional database.
+#
+# **Importing this algorithm into a python program**
+# ---------------------------------------------------------
+#
+#             import PAMI.frequentPattern.basic.ECLATbitset as alg
+#
+#             obj = alg.ECLATbitset(iFile, minSup)
+#
+#             obj.mine()
+#
+#             frequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(frequentPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+from PAMI.frequentPattern.basic import abstract as _ab
+from deprecated import deprecated
+
+
+
+[docs] +class ECLATbitset(_ab._frequentPatterns): + """ + :Description: ECLATbitset is one of the fundamental algorithm to discover frequent patterns in a transactional database. + + :Reference: Mohammed Javeed Zaki: Scalable Algorithms for Association Mining. IEEE Trans. Knowl. Data Eng. 12(3): + 372-390 (2000), https://ieeexplore.ieee.org/document/846291 + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + startTime : float + To record the start time of the mining process + + endTime : float + To record the completion time of the mining process + + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + + memoryUSS : float + To store the total amount of USS memory consumed by the program + + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + Database : list + To store the transactions of a database in list + + + **Methods to execute code on terminal** + ------------------------------------------ + + .. code-block:: console + + Format: + + (.venv) $ python3 ECLATbitset.py <inputFile> <outputFile> <minSup> + + Example Usage: + + (.venv) $ python3 ECLATbitset.py sampleDB.txt patterns.txt 10.0 + + .. note:: minSup will be considered in percentage of database transactions + + + **Importing this algorithm into a python program** + --------------------------------------------------------- + .. code-block:: python + + import PAMI.frequentPattern.basic.ECLATbitset as alg + + obj = alg.ECLATbitset(iFile, minSup) + + obj.mine() + + frequentPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(frequentPatterns)) + + obj.save(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + ------------------- + + The complete program was written by Yudai Masu under the supervision of Professor Rage Uday Kiran. + + """ + + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _minSup = str() + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _mapSupport = {} + _lno = 0 + + def _convert(self, value): + """ + To convert the user specified minSup value + + :param value: user specified minSup value + + :type value: int + + :return: converted type + + :rtype: int or float or string + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + self._mapSupport = {} + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r') as f: + for line in f: + self._lno += 1 + splitter = [i.rstrip() for i in line.split(self._sep)] + splitter = [x for x in splitter if x] + self._Database.append(splitter) + except IOError: + print("File Not Found") + self._minSup = self._convert(self._minSup) + +
+[docs] + @deprecated( + "It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + Frequent pattern mining process will start from here + We start with the scanning the itemSets and store the bitsets respectively. + We form the combinations of single items and check with minSup condition to check the frequency of patterns + """ + self.mine()
+ + + def _bitPacker(self, data, maxIndex): + """ + It takes the data and maxIndex as input and generates integer as output value. + + :param data: it takes data as input. + + :type data: int or float + + :param maxIndex: It converts the data into bits By taking the maxIndex value as condition. + + :type maxIndex: int + + """ + packed_bits = 0 + for i in data: + packed_bits |= 1 << (maxIndex - i) + + return packed_bits + +
+[docs] + def mine(self) -> None: + """ + Frequent pattern mining process will start from here + # Bitset implementation + """ + self._startTime = _ab._time.time() + + self._Database = [] + + self._creatingItemSets() + + items = {} + index = 0 + for line in self._Database: + for item in line: + if tuple([item]) in items: + items[tuple([item])].append(index) + else: + items[tuple([item])] = [index] + index += 1 + + # sort by length in descending order + items = dict(sorted(items.items(), key=lambda x: len(x[1]), reverse=True)) + cands = [] + for key in items: + if len(items[key]) >= self._minSup: + self._finalPatterns["\t".join(key)] = len(items[key]) + cands.append(key) + items[key] = self._bitPacker(items[key], index) + # print(key, items[key]) + else: + break + + while cands: + newCands = [] + for i in range(len(cands)): + for j in range(i + 1, len(cands)): + if cands[i][:-1] == cands[j][:-1]: + newCand = tuple(cands[i] + tuple([cands[j][-1]])) + intersection = items[tuple([newCand[0]])] + for k in range(1, len(newCand)): + intersection &= items[tuple([newCand[k]])] + count = int.bit_count(intersection) + if count >= self._minSup: + newCands.append(newCand) + newCand = "\t".join(newCand) + self._finalPatterns[newCand] = count + else: + break + + cands = newCands + + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Frequent patterns were generated successfully using Apriori algorithm ")
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to an output file + :param outFile: name of the outputfile + :type outFile: file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the result + """ + print("Total number of Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = ECLATbitset(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = ECLATbitset(_ab._sys.argv[1], _ab._sys.argv[3]) + _ap.startMine() + _ap.mine() + print("Total number of Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/frequentPattern/basic/FPGrowth.html b/sphinx/_build/html/_modules/PAMI/frequentPattern/basic/FPGrowth.html new file mode 100644 index 000000000..3a4708293 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/frequentPattern/basic/FPGrowth.html @@ -0,0 +1,707 @@ + + + + + + PAMI.frequentPattern.basic.FPGrowth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.frequentPattern.basic.FPGrowth

+# FPGrowth is one of the fundamental algorithm to discover frequent patterns in a transactional database. It stores the database in compressed fp-tree decreasing the memory usage and extracts the patterns from tree.It  employs downward closure property to  reduce the search space effectively.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.frequentPattern.basic import FPGrowth as alg
+#
+#             obj = alg.FPGrowth(iFile, minSup)
+#
+#             obj.mine()
+#
+#             frequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(frequentPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+from PAMI.frequentPattern.basic import abstract as _fp
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+from deprecated import deprecated
+from itertools import combinations
+from collections import Counter
+
+_minSup = str()
+_fp._sys.setrecursionlimit(20000)
+
+
+class _Node:
+    """
+    A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+
+        itemId: int
+            storing item of a node
+        counter: int
+            To maintain the support of node
+        parent: node
+            To maintain the parent of node
+        children: list
+            To maintain the children of node
+
+    :Methods:
+
+        addChild(node)
+            Updates the nodes children list and parent for the given node
+
+    """
+
+    def __init__(self, item, count, parent) -> None:
+        self.item = item
+        self.count = count
+        self.parent = parent
+        self.children = {}
+
+    def addChild(self, item, count = 1) -> Any:
+        """
+        Adds a child node to the current node with the specified item and count.
+
+        :param item: The item associated with the child node.
+        :type item: List
+
+        :param count: The count or support of the item. Default is 1.
+        :type count: int
+
+        :return: The child node added.
+        :rtype: List
+
+        """
+        if item not in self.children:
+            self.children[item] = _Node(item, count, self)
+        else:
+            self.children[item].count += count
+        return self.children[item]
+    
+    def traverse(self) -> Tuple[List[int], int]:
+        """
+        Traversing the tree to get the transaction
+
+        :return: transaction and count of each item in transaction
+
+        :rtype: Tuple, List and int
+        """
+        transaction = []
+        count = self.count
+        node = self.parent
+        while node.parent is not None:
+            transaction.append(node.item)
+            node = node.parent
+        return transaction[::-1], count
+
+
+
+[docs] +class FPGrowth(_fp._frequentPatterns): + """ + + :Description: FPGrowth is one of the fundamental algorithm to discover frequent patterns in a transactional database. It stores the database in compressed fp-tree decreasing the memory usage and extracts the patterns from tree.It employs downward closure property to reduce the search space effectively. + + :Reference: Han, J., Pei, J., Yin, Y. et al. Mining Frequent Patterns without Candidate Generation: A Frequent-Pattern + Tree Approach. Data Mining and Knowledge Discovery 8, 53–87 (2004). https://doi.org/10.1023 + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + + :Attributes: + + startTime : float + To record the start time of the mining process + + endTime : float + To record the completion time of the mining process + + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + + memoryUSS : float + To store the total amount of USS memory consumed by the program + + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + Database : list + To store the transactions of a database in list + + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transactions + tree : class + it represents the Tree class + finalPatterns : dict + it represents to store the patterns + + + **Methods to execute code on terminal** + -------------------------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 FPGrowth.py <inputFile> <outputFile> <minSup> + + Example Usage: + + (.venv) $ python3 FPGrowth.py sampleDB.txt patterns.txt 10.0 + + .. note:: minSup will be considered in percentage of database transactions + + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.frequentPattern.basic import FPGrowth as alg + + obj = alg.FPGrowth(iFile, minSup) + + obj.mine() + + frequentPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(frequentPatterns)) + + obj.savePatterns(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + + **Credits:** + ---------------------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + __startTime = float() + __endTime = float() + _minSup = str() + __finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + __memoryUSS = float() + __memoryRSS = float() + __Database = [] + __mapSupport = {} + __lno = 0 + __rank = {} + __rankDup = {} + + def __init__(self, iFile, minSup, sep='\t') -> None: + super().__init__(iFile, minSup, sep) + + def __creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self.__Database = [] + if isinstance(self._iFile, _fp._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self.__Database = self._iFile['Transactions'].tolist() + + #print(self.Database) + if isinstance(self._iFile, str): + if _fp._validators.url(self._iFile): + data = _fp._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self.__Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self.__Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def __convert(self, value) -> float: + """ + + To convert the type of user specified minSup value + + :param value: user specified minSup value + + :return: converted type + + :rtype: float + + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self.__Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self.__Database) * value) + else: + value = int(value) + return value + + def _construct(self, items, data, minSup): + """ + Constructs the FP-tree from the given transactions. + + :param items: A dictionary containing item frequencies. + :type items: Dict + + :param data: A list of transactions. + :type data: List + + :param minSup: The minimum support threshold. + :type minSup: int + + :return: The root node of the constructed FP-tree and a dictionary containing information about nodes associated with each item. + :rtype: Tuple[_Node, Dict] + """ + + items = {k: v for k, v in items.items() if v >= minSup} + + root = _Node([], 0, None) + itemNodes = {} + for line in data: + currNode = root + line = sorted([item for item in line if item in items], key = lambda x: items[x], reverse = True) + for item in line: + currNode = currNode.addChild(item) + if item in itemNodes: + itemNodes[item][0].add(currNode) + itemNodes[item][1] += 1 + else: + itemNodes[item] = [set([currNode]), 1] + + return root, itemNodes + + def _all_combinations(self, arr): + """ + Generates all possible combinations of items from a given transaction. + + :param arr: A list of items in a transaction. + :type arr: List + + :return: A list containing all possible combinations of items. + :rtype: List + + """ + + all_combinations_list = [] + for r in range(1, len(arr) + 1): + all_combinations_list.extend(combinations(arr, r)) + return all_combinations_list + + def _recursive(self, root, itemNode, minSup, patterns): + """ + + Recursively explores the FP-tree to generate frequent patterns. + + :param root: The root node of the current subtree. + :type root: _Node + + :param itemNode: A dictionary containing information about the nodes associated with each item. + :type itemNode: Dict + + :param minSup: The minimum support threshold. + :type minSup: int + + :param patterns: A dictionary to store the generated frequent patterns. + :type patterns: Dict + + """ + itemNode = {k: v for k, v in sorted(itemNode.items(), key = lambda x: x[1][1])} + + for item in itemNode: + if itemNode[item][1] < self._minSup: + break + + newRoot = _Node(root.item + [item], 0, None) + pat = "\t".join([str(i) for i in newRoot.item]) + self.__finalPatterns[pat] = itemNode[item][1] + newItemNode = {} + + if len(itemNode[item][0]) == 1: + transaction, count = itemNode[item][0].pop().traverse() + if len(transaction) == 0: + continue + combination = self._all_combinations(transaction) + for comb in combination: + pat = "\t".join([str(i) for i in comb]) + pat = pat + "\t" + "\t".join([str(i) for i in newRoot.item]) + self.__finalPatterns[pat] = count + # self._finalPatterns[tuple(list(comb) + newRoot.item)] = count + pass + + + itemCount = {} + transactions = {} + for node in itemNode[item][0]: + transaction, count = node.traverse() + if len(transaction) == 0: + continue + if tuple(transaction) in transactions: + transactions[tuple(transaction)] += count + else: + transactions[tuple(transaction)] = count + + + for item in transaction: + if item in itemCount: + itemCount[item] += count + else: + itemCount[item] = count + + + # remove items that are below minSup + itemCount = {k: v for k, v in itemCount.items() if v >= minSup} + if len(itemCount) == 0: + continue + + for transaction, count in transactions.items(): + transaction = sorted([item for item in transaction if item in itemCount], key = lambda x: itemCount[x], reverse = True) + currNode = newRoot + for item in transaction: + currNode = currNode.addChild(item, count) + if item in newItemNode: + newItemNode[item][0].add(currNode) + newItemNode[item][1] += count + else: + newItemNode[item] = [set([currNode]), count] + + if len(newItemNode) < 1: + continue + + # mine(newRoot, newItemNode, minSup, patterns) + self._recursive(newRoot, newItemNode, minSup, patterns) + + +
+[docs] + def mine(self) -> None: + """ + Main program to start the operation + """ + global _minSup + self.__startTime = _fp._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minSup is None: + raise Exception("Please enter the Minimum Support") + self.__creatingItemSets() + self._minSup = self.__convert(self._minSup) + _minSup = self._minSup + + itemCount = Counter() + for line in self.__Database: + itemCount.update(line) + + root, itemNode = self._construct(itemCount, self.__Database, self._minSup) + self._recursive(root, itemNode, self._minSup, self.__finalPatterns) + + print("Frequent patterns were generated successfully using frequentPatternGrowth algorithm") + self.__endTime = _fp._time.time() + self.__memoryUSS = float() + self.__memoryRSS = float() + process = _fp._psutil.Process(_fp._os.getpid()) + self.__memoryUSS = process.memory_full_info().uss + self.__memoryRSS = process.memory_info().rss
+ + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + Starting the mining process + """ + self.mine()
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + + :rtype: float + """ + + return self.__memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self.__memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + + :rtype: float + """ + + return self.__endTime - self.__startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _fp._pd.DataFrame: + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + + :rtype: pd.DataFrame + """ + + dataframe = {} + data = [] + for a, b in self.__finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataframe = _fp._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataframe
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + + :type outFile: csvfile + + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self.__finalPatterns.items(): + s1 = x.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> Dict[str, int]: + """ + Function to send the set of frequent patterns after completion of the mining process + :return: returning frequent patterns + :rtype: dict + """ + return self.__finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + """ + print("Total number of Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_fp._sys.argv) == 4 or len(_fp._sys.argv) == 5: + if len(_fp._sys.argv) == 5: + _ap = FPGrowth(_fp._sys.argv[1], _fp._sys.argv[3], _fp._sys.argv[4]) + if len(_fp._sys.argv) == 4: + _ap = FPGrowth(_fp._sys.argv[1], _fp._sys.argv[3]) + _ap.startMine() + _ap.mine() + print("Total number of Frequent Patterns:", len( _ap.getPatterns())) + _ap.save(_fp._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") + + + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/frequentPattern/closed/CHARM.html b/sphinx/_build/html/_modules/PAMI/frequentPattern/closed/CHARM.html new file mode 100644 index 000000000..cb29bc21e --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/frequentPattern/closed/CHARM.html @@ -0,0 +1,692 @@ + + + + + + PAMI.frequentPattern.closed.CHARM — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.frequentPattern.closed.CHARM

+# CHARM is an algorithm to discover closed frequent patterns in a transactional database. Closed frequent patterns are patterns if there exists no superset that has the same support count as this original itemset . This algorithm employs depth-first search technique to find the complete set of closed frequent patterns in a
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------------
+#
+#
+#             from PAMI.frequentPattern.closed import CHARM as alg
+#
+#             obj = alg.CHARM(iFile, minSup)
+#
+#             obj.mine()
+#
+#             frequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Closed Frequent Patterns:", len(frequentPatterns))
+#
+#             obj.savePatterns(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+
+from PAMI.frequentPattern.closed import abstract as _ab
+from deprecated import deprecated
+
+
+
+[docs] +class CHARM(_ab._frequentPatterns): + """ + :Description: CHARM is an algorithm to discover closed frequent patterns in a transactional database. Closed frequent patterns are patterns if there exists no superset that has the same support count as this original itemset. This algorithm employs depth-first search technique to find the complete set of closed frequent patterns in a + + + :Reference: Mohammed J. Zaki and Ching-Jui Hsiao, CHARM: An Efficient Algorithm for Closed Itemset Mining, + Proceedings of the 2002 SIAM, SDM. 2002, 457-473, https://doi.org/10.1137/1.9781611972726.27 + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + + :Attributes: + + startTime : float + To record the start time of the mining process + + endTime : float + To record the completion time of the mining process + + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + + memoryUSS : float + To store the total amount of USS memory consumed by the program + + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + Database : list + To store the transactions of a database in list + + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transactions + tree : class + it represents the Tree class + itemSetCount : int + it represents the total no of patterns + finalPatterns : dict + it represents to store the patterns + tidList : dict + stores the timestamps of an item + hashing : dict + stores the patterns with their support to check for the closed property + + + **Methods to execute code on terminal** + -------------------------------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 CHARM.py <inputFile> <outputFile> <minSup> + + Example Usage: + + (.venv) $ python3 CHARM.py sampleDB.txt patterns.txt 10.0 + + .. note:: minSup will be considered in percentage of database transactions + + + **Importing this algorithm into a python program** + -------------------------------------------------------------- + .. code-block:: python + + from PAMI.frequentPattern.closed import CHARM as alg + + obj = alg.CHARM(iFile, minSup) + + obj.mine() + + frequentPatterns = obj.getPatterns() + + print("Total number of Closed Frequent Patterns:", len(frequentPatterns)) + + obj.savePatterns(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + + **Credits:** + ------------------------------- + + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + _startTime = float() + _endTime = float() + _minSup = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _tidList = {} + _lno = 0 + _mapSupport = {} + _hashing = {} + _itemSetCount = 0 + _maxItemId = 0 + _tableSize = 10000 + _writer = None + + def _convert(self, value): + """ + + To convert the type of user specified minSup value + + :param value: user specified minSup value + + :type value: int or float or str + + :return: converted type + + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (self._lno * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (self._lno * value) + else: + value = int(value) + return value + + def _creatingItemsets(self): + """ + Storing the complete frequent patterns of the database/input file in a database variable + """ + self._mapSupport = {} + self._tidList = {} + self._lno = 0 + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + for i in self._Database: + self._lno += 1 + for j in i: + if j not in self._mapSupport: + self._mapSupport[j] = 1 + self._tidList[j] = [self._lno] + else: + self._mapSupport[j] += 1 + self._tidList[j].append(self._lno) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + self._lno += 1 + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + for j in temp: + if j not in self._mapSupport: + self._mapSupport[j] = 1 + self._tidList[j] = [self._lno] + else: + self._mapSupport[j] += 1 + self._tidList[j].append(self._lno) + else: + try: + with open(self._iFile, 'r') as f: + for line in f: + i = [i.rstrip() for i in line.split(self._sep)] + i = [x for x in i if x] + self._lno += 1 + for j in i: + if j not in self._mapSupport: + self._mapSupport[j] = 1 + self._tidList[j] = [self._lno] + else: + self._mapSupport[j] += 1 + self._tidList[j].append(self._lno) + except IOError: + print("File Not Found") + self._minSup = self._convert(self._minSup) + self._mapSupport = {k: v for k, v in self._mapSupport.items() if v >= self._minSup} + _flist = {} + self._tidList = {k: v for k, v in self._tidList.items() if k in self._mapSupport} + for x, y in self._tidList.items(): + t1 = 0 + for i in y: + t1 += i + _flist[x] = t1 + _flist = [key for key, value in sorted(_flist.items(), key=lambda x: x[1])] + return _flist + + def _calculate(self, tidSet): + """ + To calculate the hashcode of pattern + + :param tidSet: the timestamps of a pattern + + :type tidSet: list + + :rtype: int + """ + + hashcode = 0 + for i in tidSet: + hashcode += i + if hashcode < 0: + hashcode = abs(0 - hashcode) + return hashcode % self._tableSize + + def _contains(self, itemSet, value, hashcode): + """ + Check for the closed property(patterns with same support) by checking the hashcode(sum of timestamps), + if hashcode key in hashing dict is none then returns a false, else returns with true. + :param itemSet: frequent pattern + :type itemSet: list + :param value: support of the pattern + :type value: int + :param hashcode: calculated from the timestamps of pattern + :type hashcode: int + """ + if self._hashing.get(hashcode) is None: + return False + for i in self._hashing[hashcode]: + itemSetx = i + if value == self._hashing[hashcode][itemSetx] and set(itemSetx).issuperset(itemSet): + return True + return False + + def _save(self, prefix, suffix, tidSetx): + """ + Check for the closed property (patterns with same support), if found deletes the subsets and stores + supersets and also saves the patterns that satisfy the closed property + + :param prefix: the prefix of a pattern + + :type prefix: frequent item or pattern + + :param suffix: the suffix of a patterns + + :type suffix: list + + :param tidSetx: the timestamp of a patterns + + :type tidSetx: list + """ + if prefix is None: + prefix = suffix + else: + prefix = prefix + suffix + prefix = list(set(prefix)) + prefix.sort() + val = len(tidSetx) + if val >= self._minSup: + hashcode = self._calculate(tidSetx) + if self._contains(prefix, val, hashcode) is False: + sample = str() + for i in prefix: + sample = sample + i + "\t" + self._itemSetCount += 1 + self._finalPatterns[sample] = val + if hashcode not in self._hashing: + self._hashing[hashcode] = {tuple(prefix): val} + else: + self._hashing[hashcode][tuple(prefix)] = val + + def _processEquivalenceClass(self, prefix, itemSets, tidSets): + """ + Equivalence class is followed and check for the patterns which satisfies frequent properties. + :param prefix: main equivalence prefix + :type prefix: frequent item or pattern + :param itemSets: patterns which are items combined with prefix and satisfying the minSup + :type itemSets: list + :param tidSets: timestamps of the items in the argument itemSets + :type tidSets: list + """ + if len(itemSets) == 1: + i = itemSets[0] + tidI = tidSets[0] + self._save(prefix, [i], tidI) + return + if len(itemSets) == 2: + itemX = itemSets[0] + tidSetX = tidSets[0] + itemY = itemSets[1] + tidSetY = tidSets[1] + y1 = list(set(tidSetX).intersection(tidSetY)) + if len(y1) >= self._minSup: + suffix = [] + suffix += [itemX, itemY] + suffix = list(set(suffix)) + self._save(prefix, suffix, y1) + if len(y1) != len(tidSetX): + self._save(prefix, [itemX], tidSetX) + if len(y1) != len(tidSetY): + self._save(prefix, [itemX], tidSetY) + return + for i in range(len(itemSets)): + itemX = itemSets[i] + if itemX is None: + continue + tidSetX = tidSets[i] + classItemSets = [] + classTidSets = [] + itemSetx = [itemX] + for j in range(i + 1, len(itemSets)): + itemY = itemSets[j] + if itemY is None: + continue + tidSetY = tidSets[j] + y = list(set(tidSetX).intersection(tidSetY)) + if len(y) < self._minSup: + continue + if len(tidSetX) == len(tidSetY) and len(y) == len(tidSetX): + itemSets.insert(j, None) + tidSets.insert(j, None) + itemSetx.append(itemY) + elif len(tidSetX) < len(tidSetY) and len(y) == len(tidSetX): + itemSetx.append(itemY) + elif len(tidSetX) > len(tidSetY) and len(y) == len(tidSetY): + itemSets.insert(j, None) + tidSets.insert(j, None) + classItemSets.append(itemY) + classTidSets.append(y) + else: + classItemSets.append(itemY) + classTidSets.append(y) + if len(classItemSets) > 0: + newPrefix = list(set(itemSetx)) + prefix + self._processEquivalenceClass(newPrefix, classItemSets, classTidSets) + self._save(prefix, list(set(itemSetx)), tidSetX) + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + Mining process will start from here by extracting the frequent patterns from the database. It performs prefix + equivalence to generate the combinations and closed frequent patterns. + """ + self.mine()
+ + +
+[docs] + def mine(self): + """ + Mining process will start from here by extracting the frequent patterns from the database. It performs prefix + equivalence to generate the combinations and closed frequent patterns. + """ + self._startTime = _ab._time.time() + _plist = self._creatingItemsets() + self._finalPatterns = {} + self._hashing = {} + for i in range(len(_plist)): + itemX = _plist[i] + if itemX is None: + continue + tidSetx = self._tidList[itemX] + itemSetx = [itemX] + itemSets = [] + tidSets = [] + for j in range(i + 1, len(_plist)): + itemY = _plist[j] + if itemY is None: + continue + tidSetY = self._tidList[itemY] + y1 = list(set(tidSetx).intersection(tidSetY)) + if len(y1) < self._minSup: + continue + if len(tidSetx) == len(tidSetY) and len(y1) == len(tidSetx): + _plist.insert(j, None) + itemSetx.append(itemY) + elif len(tidSetx) < len(tidSetY) and len(y1) == len(tidSetx): + itemSetx.append(itemY) + elif len(tidSetx) > len(tidSetY) and len(y1) == len(tidSetY): + _plist.insert(j, None) + itemSets.append(itemY) + tidSets.append(y1) + else: + itemSets.append(itemY) + tidSets.append(y1) + if len(itemSets) > 0: + self._processEquivalenceClass(itemSetx, itemSets, tidSets) + self._save(None, itemSetx, tidSetx) + print("Closed Frequent patterns were generated successfully using CHARM algorithm") + self._endTime = _ab._time.time() + _process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = _process.memory_full_info().uss + self._memoryRSS = _process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataframe = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataframe = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataframe
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to an output file + :param outFile: name of the output file + :type outFile: csvfile + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + :return: returning frequent patterns + :rtype: dict + """ + + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Total number of Closed Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = CHARM(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = CHARM(_ab._sys.argv[1], _ab._sys.argv[3]) + _ap.startMine() + _ap.mine() + print("Total number of Closed Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + _memRSS = _ap.getMemoryRSS() + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/frequentPattern/maximal/MaxFPGrowth.html b/sphinx/_build/html/_modules/PAMI/frequentPattern/maximal/MaxFPGrowth.html new file mode 100644 index 000000000..aa4bd4837 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/frequentPattern/maximal/MaxFPGrowth.html @@ -0,0 +1,909 @@ + + + + + + PAMI.frequentPattern.maximal.MaxFPGrowth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.frequentPattern.maximal.MaxFPGrowth

+# MaxFP-Growth is one of the fundamental algorithm to discover maximal frequent patterns in a transactional database.
+#
+# **Importing this algorithm into a python program**
+# ---------------------------------------------------------
+#
+#             from PAMI.frequentPattern.maximal import MaxFPGrowth as alg
+#
+#             obj = alg.MaxFPGrowth("../basic/sampleTDB.txt", "2")
+#
+#             obj.mine()
+#
+#             frequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(frequentPatterns))
+#
+#             obj.save("patterns")
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+
+from PAMI.frequentPattern.maximal import abstract as _ab
+from deprecated import deprecated
+
+
+_minSup = str()
+global maximalTree
+
+
+class _Node(object):
+    """
+    A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+        item : int
+            storing item of a node
+        counter : list
+            To maintain the support of the node
+        parent : node
+            To maintain the parent of every node
+        children : list
+            To maintain the children of node
+
+    :Methods:
+        addChild(itemName)
+            storing the children to their respective parent nodes
+    """
+
+    def __init__(self, item, children):
+        """
+        Initializing the Node class
+        :param item: Storing the item of a node
+        :type item: int or None
+        :param children: To maintain the children of a node
+        :type children: dict
+        """
+        self.item = item
+        self.children = children
+        self.counter = int()
+        self.parent = None
+
+    def addChild(self, node):
+        """
+        Adding a child to the created nodes
+        :param node: node object
+        :type node: Node
+        """
+        self.children[node.item] = node
+        node.parent = self
+
+
+class _Tree(object):
+    """
+    A class used to represent the frequentPatternGrowth tree structure
+
+    :Attributes:
+        root : Node
+               Represents the root node of the tree
+        summaries : dictionary
+                storing the nodes with same item name
+        info : dictionary
+                stores the support of items
+
+
+    :Methods:
+
+        addTransaction(transaction)
+            creating transaction as a branch in frequentPatternTree
+        addConditionalTransaction(prefixPaths, supportOfItems)
+            construct the conditional tree for prefix paths
+        condPatterns(Node)
+            generates the conditional patterns from tree for specific node
+        conditionalTransaction(prefixPaths,Support)
+            takes the prefixPath of a node and support at child of the path and extract the frequent items from
+            prefixPaths and generates prefixPaths with items which are frequent
+        remove(Node)
+            removes the node from tree once after generating all the patterns respective to the node
+        generatePatterns(Node)
+            starts from the root node of the tree and mines the frequent patterns
+    """
+
+    def __init__(self):
+        self.root = _Node(None, {})
+        self.summaries = {}
+        self.info = {}
+        #self.maximalTree = _MPTree()
+
+    def addTransaction(self, transaction):
+        """
+
+        Adding transactions into tree
+
+        :param transaction: represents the transaction in a database
+
+        :type transaction: list
+
+        :return: tree
+
+        """
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _Node(transaction[i], {})
+                newNode.counter = 1
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].append(newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+                currentNode.counter += 1
+
+    def addConditionalTransaction(self, transaction, count):
+        """
+
+        Loading the database into a tree
+
+        :param transaction: conditional transaction of a node
+
+        :type transaction: list
+
+        :param count: the support of conditional transaction
+
+        :type count: int
+
+        :return: conditional tree
+
+        """
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _Node(transaction[i], {})
+                newNode.counter = count
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].append(newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+                currentNode.counter += count
+
+    def getConditionalPatterns(self, alpha):
+        """
+        Generates all the conditional patterns of respective node
+        :param alpha: it represents the Node in tree
+        :type alpha: int
+        :return: conditional patterns of a node
+        """
+        finalPatterns = []
+        finalSets = []
+        for i in self.summaries[alpha]:
+            set1 = i.counter
+            set2 = []
+            while i.parent.item is not None:
+                set2.append(i.parent.item)
+                i = i.parent
+            if len(set2) > 0:
+                set2.reverse()
+                finalPatterns.append(set2)
+                finalSets.append(set1)
+        finalPatterns, finalSets, info = self.conditionalTransactions(finalPatterns, finalSets)
+        return finalPatterns, finalSets, info
+
+    def conditionalTransactions(self, condPatterns, condFreq):
+        """
+        sorting and removing the items from conditional transactions which don't satisfy minSup
+        :param condPatterns: conditional patterns if a node
+        :type condPatterns: list
+        :param condFreq: frequency at leaf node of conditional transaction
+        :type condFreq: int
+        :return: conditional patterns and their frequency respectively
+        """
+        global _minSup
+        pat = []
+        tids = []
+        data1 = {}
+        for i in range(len(condPatterns)):
+            for j in condPatterns[i]:
+                if j not in data1:
+                    data1[j] = condFreq[i]
+                else:
+                    data1[j] += condFreq[i]
+        updatedDict = {}
+        updatedDict = {k: v for k, v in data1.items() if v >= _minSup}
+        count = 0
+        for p in condPatterns:
+            p1 = [v for v in p if v in updatedDict]
+            trans = sorted(p1, key=lambda x: (updatedDict.get(x), -x), reverse=True)
+            if len(trans) > 0:
+                pat.append(trans)
+                tids.append(condFreq[count])
+            count += 1
+        return pat, tids, updatedDict
+
+    def removeNode(self, nodeValue):
+        """
+        To remove the node from the original tree
+        :param nodeValue: leaf node of tree
+        :type nodeValue: int
+        :return: tree after deleting node
+        """
+        for i in self.summaries[nodeValue]:
+            del i.parent.children[nodeValue]
+            i = None
+
+    def generatePatterns(self, prefix, patterns, maximalTree):
+        """
+        Generates the patterns
+        :param prefix: forms the combination of items
+        :type prefix: str
+        :param patterns: the patterns we want to generate for this node
+        :type patterns: list
+        :return: the maximal frequent patterns
+        :rtype: list
+        """
+        for i in sorted(self.summaries, key=lambda x: (self.info.get(x), -x)):
+            pattern = prefix[:]
+            pattern.append(i)
+            condPatterns, tids, info = self.getConditionalPatterns(i)
+            conditional_tree = _Tree()
+            conditional_tree.info = info.copy()
+            head = pattern[:]
+            tail = []
+            for la in info:
+                tail.append(la)
+            sub = head + tail
+            if maximalTree.checkerSub(sub) == 1:
+                for pat in range(len(condPatterns)):
+                    conditional_tree.addConditionalTransaction(condPatterns[pat], tids[pat])
+                if len(condPatterns) >= 1:
+                    conditional_tree.generatePatterns(pattern, patterns, maximalTree)
+                else:
+                    maximalTree.addTransaction(pattern)
+                    patterns[tuple(pattern)] = self.info[i]
+            self.removeNode(i)
+
+
+class _MNode(object):
+    """
+    A class used to represent the node in maximal tree
+
+    :Attributes:
+        item : int
+            storing item of a node
+        children : list
+            To maintain the children of node
+
+    :Methods:
+        addChild(itemName)
+            storing the children to their respective parent nodes
+    """
+
+    def __init__(self, item, children):
+        self.item = item
+        self.children = children
+
+    def addChild(self, node):
+        """
+        To add the children details to a parent node
+        :param node: children node
+        :type node: _MNode
+        :return: adding children details to parent node
+        """
+        self.children[node.item] = node
+        node.parent = self
+
+
+class _MPTree(object):
+    """
+    A class used to represent the frequentPatternGrowth tree structure
+
+    :Attributes:
+
+        root : Node
+            Represents the root node of the tree
+        summaries : dictionary
+            storing the nodes with same item name
+
+
+    :Methods:
+
+        addTransaction(transaction)
+            creating transaction as a branch in frequentPatternTree
+        addConditionalTransaction(prefixPaths, supportOfItems)
+            construct the conditional tree for prefix paths
+        checkerSub(items):
+            Given a set of items to the subset of them is present or not
+    """
+
+    def __init__(self):
+        self.root = _MNode(None, {})
+        self.summaries = {}
+
+    def addTransaction(self, transaction):
+        """
+        To construct the maximal frequent pattern into maximal tree
+        :param transaction: the maximal frequent patterns extracted till now
+        :type transaction: list
+        :return: the maximal tree
+        """
+        currentNode = self.root
+        transaction.sort()
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _MNode(transaction[i], {})
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].insert(0, newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+
+    def checkerSub(self, items):
+        """
+        To check the subset of pattern present in tree
+        :param items: the sub frequent pattern
+        :type items: list
+        :return: checks if subset present in the tree
+        """
+        items.sort(reverse=True)
+        item = items[0]
+        if item not in self.summaries:
+            return 1
+        else:
+            if len(items) == 1:
+                return 0
+        for t in self.summaries[item]:
+            cur = t.parent
+            i = 1
+            while cur.item is not None:
+                if items[i] == cur.item:
+                    i += 1
+                    if i == len(items):
+                        return 0
+                cur = cur.parent
+        return 1
+
+
+# Initialising the  variable for maximal tree
+#maximalTree = _MPTree()
+
+
+
+[docs] +class MaxFPGrowth(_ab._frequentPatterns): + """ + :Description: MaxFP-Growth is one of the fundamental algorithm to discover maximal frequent patterns in a transactional database. + + :Reference: Grahne, G. and Zhu, J., "High Performance Mining of Maximal Frequent itemSets", + http://users.encs.concordia.ca/~grahne/papers/hpdm03.pdf + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + :param maxPer: float : + The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + + :Attributes: + + startTime : float + To record the start time of the mining process + + endTime : float + To record the completion time of the mining process + + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + + memoryUSS : float + To store the total amount of USS memory consumed by the program + + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + Database : list + To store the transactions of a database in list + + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transactions + tree : class + it represents the Tree class + itemSetCount : int + it represents the total no of patterns + finalPatterns : dict + it represents to store the patterns + + + **Methods to execute code on terminal** + --------------------------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 MaxFPGrowth.py <inputFile> <outputFile> <minSup> + + Example Usage: + + (.venv) $ python3 MaxFPGrowth.py sampleDB.txt patterns.txt 0.3 + + .. note:: minSup will be considered in percentage of database transactions + + + + **Importing this algorithm into a python program** + --------------------------------------------------------- + + .. code-block:: python + + from PAMI.frequentPattern.maximal import MaxFPGrowth as alg + + obj = alg.MaxFPGrowth("../basic/sampleTDB.txt", "2") + + obj.mine() + + frequentPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(frequentPatterns)) + + obj.savePatterns("patterns") + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + + **Credits:** + ------------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + _startTime = float() + _endTime = float() + _minSup = str() + _maxPer = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _rank = {} + _rankdup = {} + _lno = 0 + _maximalTree = str() + + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + #print(line) + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _frequentOneItem(self): + """ + To extract the one-length frequent itemSets + :return: 1-length frequent items + """ + _mapSupport = {} + k = 0 + for tr in self._Database: + k += 1 + for i in range(0, len(tr)): + if tr[i] not in _mapSupport: + _mapSupport[tr[i]] = 1 + else: + _mapSupport[tr[i]] += 1 + _mapSupport = {k: v for k, v in _mapSupport.items() if v >= self._minSup} + #print(len(mapSupport), self.minSup) + genList = [k for k, v in sorted(_mapSupport.items(), key=lambda x: x[1], reverse=True)] + self._rank = dict([(index, item) for (item, index) in enumerate(genList)]) + return _mapSupport, genList + + def _updateTransactions(self, oneLength): + """ + To sort the transactions in their support descending order and allocating ranks respectively + :param oneLength: 1-length frequent items in dictionary + :type oneLength: dict + :return: returning the sorted list + :Example: oneLength = {'a':7, 'b': 5, 'c':'4', 'd':3} + rank = {'a':0, 'b':1, 'c':2, 'd':3} + """ + list1 = [] + for tr in self._Database: + list2 = [] + for i in range(0, len(tr)): + if tr[i] in oneLength: + list2.append(self._rank[tr[i]]) + if len(list2) >= 2: + list2.sort() + list1.append(list2) + return list1 + + + def _buildTree(self, data, info): + """ + creating the root node as null in fp-tree and adding all transactions into tree. + :param data: updated transactions + :type data: dict + :param info: rank of items in transactions + :type info: dict + :return: fp-tree + """ + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(data)): + rootNode.addTransaction(data[i]) + return rootNode + + + def _convert(self, value): + """ + To convert the type of user specified minSup value + :param value: user specified minSup value + :type value: int or float or str + :return: converted type + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = ((len(self._Database)) * value) + else: + value = int(value) + return value + + def _convertItems(self, itemSet): + """ + To convert the item ranks into their original item names + :param itemSet: itemSet or a pattern + :type itemSet: list + :return: original pattern + """ + t1 = [] + for i in itemSet: + t1.append(self._rankdup[i]) + return t1 + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + Mining process will start from this function + """ + + self.mine()
+ + +
+[docs] + def mine(self): + """ + Mining process will start from this function + """ + + global _minSup + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minSup is None: + raise Exception("Please enter the Minimum Support") + self._creatingItemSets() + self._minSup = self._convert(self._minSup) + _minSup = self._minSup + generatedItems, pfList = self._frequentOneItem() + updatedTransactions = self._updateTransactions(generatedItems) + for x, y in self._rank.items(): + self._rankdup[y] = x + info = {self._rank[k]: v for k, v in generatedItems.items()} + patterns = {} + self._finalPatterns = {} + self._maximalTree = _MPTree() + Tree = self._buildTree(updatedTransactions, info) + Tree.generatePatterns([], patterns, self._maximalTree) + for x, y in patterns.items(): + pattern = str() + x = self._convertItems(x) + for i in x: + pattern = pattern + i + "\t" + self._finalPatterns[pattern] = y + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Maximal Frequent patterns were generated successfully using MaxFp-Growth algorithm ")
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to a output file + :param outFile: name of the output file + :type outFile: csvfile + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + """ + This functon is used to print the results + """ + print('Total number of Maximal Frequent Patterns: ' + str(self.getPatterns())) + print('Runtime: ' + str(self.getRuntime())) + print('Memory (RSS): ' + str(self.getMemoryRSS())) + print('Memory (USS): ' + str(self.getMemoryUSS()))
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = MaxFPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = MaxFPGrowth(_ab._sys.argv[1], _ab._sys.argv[3]) + _ap.startMine() + _ap.mine() + _ap.save(_ab._sys.argv[2]) + print("Total number of Maximal Frequent Patterns:", len(_ap.getPatterns())) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/frequentPattern/topk/FAE.html b/sphinx/_build/html/_modules/PAMI/frequentPattern/topk/FAE.html new file mode 100644 index 000000000..1bc6e3520 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/frequentPattern/topk/FAE.html @@ -0,0 +1,591 @@ + + + + + + PAMI.frequentPattern.topk.FAE — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.frequentPattern.topk.FAE

+# Top - K is and algorithm to discover top frequent patterns in a transactional database.
+#
+# **Importing this algorithm into a python program**
+# ---------------------------------------------------------
+#
+#             import PAMI.frequentPattern.topK.FAE as alg
+#
+#             obj = alg.FAE(iFile, K)
+#
+#             obj.mine()
+#
+#             topKFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(topKFrequentPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+from PAMI.frequentPattern.topk import abstract as _ab
+from deprecated import deprecated
+
+
+
+[docs] +class FAE(_ab._frequentPatterns): + """ + :Description: Top - K is and algorithm to discover top frequent patterns in a transactional database. + + + :Reference: Zhi-Hong Deng, Guo-Dong Fang: Mining Top-Rank-K Frequent Patterns: DOI: 10.1109/ICMLC.2007.4370261 · Source: IEEE Xplore + https://ieeexplore.ieee.org/document/4370261 + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param k: int : + User specified count of top frequent patterns + :param minimum: int : + Minimum number of frequent patterns to consider in analysis + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + + :Attributes: + + startTime : float + To record the start time of the mining process + + endTime : float + To record the completion time of the mining process + + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + + memoryUSS : float + To store the total amount of USS memory consumed by the program + + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + finalPatterns : dict + it represents to store the patterns + + + **Methods to execute code on terminal** + ------------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 FAE.py <inputFile> <outputFile> <K> + + Example Usage: + + (.venv) $ python3 FAE.py sampleDB.txt patterns.txt 10 + + .. note:: k will be considered as count of top frequent patterns to consider in analysis + + + + **Importing this algorithm into a python program** + --------------------------------------------------------- + .. code-block:: python + + import PAMI.frequentPattern.topK.FAE as alg + + obj = alg.FAE(iFile, K) + + obj.mine() + + topKFrequentPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(topKFrequentPatterns)) + + obj.save(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + Credits: + -------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + _startTime = float() + _endTime = float() + _k = int() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _tidList = {} + _minimum = int() + + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + + """ + + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + + # print(self.Database) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _frequentOneItem(self): + """ + Generating one frequent patterns + """ + candidate = {} + self._tidList = {} + for i in range(len(self._Database)): + for j in self._Database[i]: + if j not in candidate: + candidate[j] = 1 + self._tidList[j] = [i] + else: + candidate[j] += 1 + self._tidList[j].append(i) + self._finalPatterns = {} + plist = [key for key, value in sorted(candidate.items(), key=lambda x: x[1], reverse=True)] + for i in plist: + if len(self._finalPatterns) >= self._k: + break + else: + self._finalPatterns[i] = candidate[i] + self._minimum = min([self._finalPatterns[i] for i in self._finalPatterns.keys()]) + plist = list(self._finalPatterns.keys()) + return plist + + def _save(self, prefix, suffix, tidSetI): + """Saves the patterns that satisfy the periodic frequent property. + + :param prefix: the prefix of a pattern + :type prefix: list + :param suffix: the suffix of a patterns + :type suffix: list + :param tidSetI: the timestamp of a patterns + :type tidSetI: list + """ + + if prefix is None: + prefix = suffix + else: + prefix = prefix + suffix + val = len(tidSetI) + sample = str() + for i in prefix: + sample = sample + i + "\t" + if len(self._finalPatterns) < self._k: + if val > self._minimum: + self._finalPatterns[sample] = val + self._finalPatterns = {k: v for k, v in sorted(self._finalPatterns.items(), key=lambda item: item[1], reverse=True)} + self._minimum = min([i for i in self._finalPatterns.values()]) + else: + for x, y in sorted(self._finalPatterns.items(), key=lambda x: x[1]): + if val > y: + del self._finalPatterns[x] + self._finalPatterns[sample] = val + self._finalPatterns = {k: v for k, v in + sorted(self._finalPatterns.items(), key=lambda item: item[1], + reverse=True)} + self._minimum = min([i for i in self._finalPatterns.values()]) + return + + def _Generation(self, prefix, itemSets, tidSets): + """Equivalence class is followed and checks for the patterns generated for periodic-frequent patterns. + + :param prefix: main equivalence prefix + :type prefix: periodic-frequent item or pattern + :param itemSets: patterns which are items combined with prefix and satisfying the periodicity + and frequent with their timestamps + :type itemSets: list + :param tidSets: timestamps of the items in the argument itemSets + :type tidSets: list + + + """ + if len(itemSets) == 1: + i = itemSets[0] + tidI = tidSets[0] + self._save(prefix, [i], tidI) + return + for i in range(len(itemSets)): + itemI = itemSets[i] + if itemI is None: + continue + tidSetI = tidSets[i] + classItemSets = [] + classTidSets = [] + itemSetX = [itemI] + for j in range(i + 1, len(itemSets)): + itemJ = itemSets[j] + tidSetJ = tidSets[j] + y = list(set(tidSetI).intersection(tidSetJ)) + if len(y) >= self._minimum: + classItemSets.append(itemJ) + classTidSets.append(y) + newPrefix = list(set(itemSetX)) + prefix + self._Generation(newPrefix, classItemSets, classTidSets) + self._save(prefix, list(set(itemSetX)), tidSetI) + + def _convert(self, value): + """ + to convert the type of user specified minSup value + :param value: user specified minSup value + :type value: int or float or str + :return: converted type + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = ((len(self._Database)) * value) + else: + value = int(value) + return value + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + Main function of the program + """ + self.mine()
+ + +
+[docs] + def mine(self): + """ + Main function of the program + """ + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._k is None: + raise Exception("Please enter the Minimum Support") + self._creatingItemSets() + self._k = self._convert(self._k) + plist = self._frequentOneItem() + for i in range(len(plist)): + itemI = plist[i] + tidSetI = self._tidList[itemI] + itemSetX = [itemI] + itemSets = [] + tidSets = [] + for j in range(i + 1, len(plist)): + itemJ = plist[j] + tidSetJ = self._tidList[itemJ] + y1 = list(set(tidSetI).intersection(tidSetJ)) + if len(y1) >= self._minimum: + itemSets.append(itemJ) + tidSets.append(y1) + self._Generation(itemSetX, itemSets, tidSets) + print(" TopK frequent patterns were successfully generated using FAE algorithm.") + self._endTime = _ab._time.time() + self._memoryUSS = float() + self._memoryRSS = float() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + + :type outFile: file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printTOPK(self): + """ + This function is used to print the results + """ + print("Top K Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = FAE(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = FAE(_ab._sys.argv[1], _ab._sys.argv[3]) + _ap.startMine() + _ap.mine() + print("Top K Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") + + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/fuzzyCorrelatedPattern/basic/FCPGrowth.html b/sphinx/_build/html/_modules/PAMI/fuzzyCorrelatedPattern/basic/FCPGrowth.html new file mode 100644 index 000000000..814c805c8 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/fuzzyCorrelatedPattern/basic/FCPGrowth.html @@ -0,0 +1,915 @@ + + + + + + PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth

+#  FCPGrowth is the algorithm to discover Correlated Fuzzy-frequent patterns in a transactional database.
+#  it is based on traditional fuzzy frequent pattern mining.
+#
+# **Importing this algorithm into a python program**
+# -------------------------------------------------------
+#
+#             from PAMI.fuzzyCorrelatedPattern.basic import FCPGrowth as alg
+#
+#             obj = alg.FCPGrowth("input.txt",2,0.4)
+#
+#             obj.mine()
+#
+#             correlatedFuzzyFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Correlated Fuzzy Frequent Patterns:", len(correlatedFuzzyFrequentPatterns))
+#
+#             obj.save("output")
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+
+from PAMI.fuzzyCorrelatedPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+from deprecated import deprecated
+
+
+class _FFList:
+    """
+    A class represent a Fuzzy List of an element
+
+    :Attributes:
+
+        item: int
+            the item name
+        sumIUtil: float
+            the sum of utilities of a fuzzy item in database
+        sumRUtil: float
+            the sum of resting values of a fuzzy item in database
+        elements: list
+            a list of elements contain tid,Utility and resting values of element in each transaction
+
+    :Methods:
+
+        addElement(element)
+            Method to add an element to this fuzzy list and update the sums at the same time.
+        printElement(e)
+            Method to print elements            
+
+    """
+
+    def __init__(self, itemName: str, region: str) -> None:
+        self.item = itemName
+        self.region = region
+        self.sumIUtil = 0.0
+        self.sumRUtil = 0.0
+        self.elements = []
+
+    def addElement(self, element: 'Element') -> None:
+        """
+        A Method that add a new element to FFList
+
+        :param element: an element to be added to FFList
+        :param element: Element
+        """
+
+        self.sumIUtil += element.IUtils
+        self.sumRUtil += element.RUtils
+        self.elements.append(element)
+
+
+
+[docs] +class Element: + """ + A class represents an Element of a fuzzy list + + :Attributes: + + tid : int + keep tact of transaction id + IUtils: float + the utility of a fuzzy item in the transaction + RUtil : float + the neighbourhood resting value of a fuzzy item in the transaction + """ + + def __init__(self, tid: int, IUtil: float, RUtil: float) -> None: + self.tid = tid + self.IUtils = IUtil + self.RUtils = RUtil
+ + + +class _Regions: + """ + A class calculate the region value + + :Attributes: + + low : int + low region value + middle: int + middle region value + high : int + high region values + """ + + def __init__(self, item: str, quantity: int, regionsNumber: int, mapOfRegions: Dict) -> None: + self.low = 0 + self.middle = 0 + self.high = 0 + if regionsNumber == 3: + if 0 < quantity <= 1: + self.low = 1 + self.high = 0 + self.middle = 0 + t1 = (item, 'L') + if t1 not in mapOfRegions.keys(): + mapOfRegions[t1] = 1 + else: + temp = mapOfRegions[t1] + mapOfRegions[t1] = temp + 1 + elif 1 <= quantity < 6: + self.low = float((-0.2 * quantity) + 1.2) + self.middle = float((0.2 * quantity) - 0.2) + self.high = 0 + t1 = (item, 'L') + if t1 not in mapOfRegions.keys(): + mapOfRegions[t1] = 1 + else: + temp = mapOfRegions[t1] + mapOfRegions[t1] = temp + 1 + t1 = (item, 'M') + if t1 not in mapOfRegions.keys(): + mapOfRegions[t1] = 1 + else: + temp = mapOfRegions[t1] + mapOfRegions[t1] = temp + 1 + elif 6 <= quantity <= 11: + self.low = 0 + self.middle = float((-0.2 * quantity) + 2.2) + self.high = float((0.2 * quantity) - 1.2) + t1 = (item, 'M') + if t1 not in mapOfRegions.keys(): + mapOfRegions[t1] = 1 + else: + temp = mapOfRegions[t1] + mapOfRegions[t1] = temp + 1 + t1 = (item, 'H') + if t1 not in mapOfRegions.keys(): + mapOfRegions[t1] = 1 + else: + temp = mapOfRegions[t1] + mapOfRegions[t1] = temp + 1 + + else: + self.low = 0 + self.middle = 0 + self.high = 1 + t1 = (item, 'H') + if t1 not in mapOfRegions.keys(): + mapOfRegions[t1] = 1 + else: + temp = mapOfRegions[t1] + mapOfRegions = temp + 1 + + +class _Pair: + + """ + A class to store item and it's quantity together + """ + + def __init__(self) -> None: + """ + A Class to Store item and its quantity together + """ + self.item = 0 + self.quantity = 0 + self.region = 'N' + + +
+[docs] +class FCPGrowth(_ab._corelatedFuzzyFrequentPatterns): + """ + :Description: FCPGrowth is the algorithm to discover Correlated Fuzzy-frequent patterns in a transactional database. + it is based on traditional fuzzy frequent pattern mining. + + :Reference: Lin, N.P., & Chueh, H. (2007). Fuzzy correlation rules mining. + https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.416.6053&rep=rep1&type=pdf + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float. + :param maxPer: float : + The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + :param minAllConf: float : + The user can specify minAllConf values within the range (0, 1). + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the input file to mine complete set of fuzzy spatial frequent patterns + oFile : file + Name of the oFile file to store complete set of fuzzy spatial frequent patterns + minSup : int + The user given support + minAllConf: float + user Specified minAllConf( should be in range 0 and 1) + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTimeTime:float + To record the startTime time of the mining process + endTime:float + To record the completion time of the mining process + itemsCnt: int + To record the number of fuzzy spatial itemSets generated + mapItemsLowSum: map + To keep track of low region values of items + mapItemsMidSum: map + To keep track of middle region values of items + mapItemsHighSum: map + To keep track of high region values of items + mapItemSum: map + To keep track of sum of Fuzzy Values of items + mapItemRegions: map + To Keep track of fuzzy regions of item + jointCnt: int + To keep track of the number of FFI-list that was constructed + BufferSize: int + represent the size of Buffer + itemBuffer list + to keep track of items in buffer + + :Methods: + + startTimeMine() + Mining process will startTime from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + getRatio(self, prefix, prefixLen, item) + Method to calculate the ration of itemSet + convert(value): + To convert the given user specified value + FSFIMining( prefix, prefixLen, fsFim, minSup) + Method generate FFI from prefix + construct(px, py) + A function to construct Fuzzy itemSet from 2 fuzzy itemSets + findElementWithTID(uList, tid) + To find element with same tid as given + WriteOut(prefix, prefixLen, item, sumIUtil,ratio) + To Store the patten + + **Executing the code on terminal :** + ------------------------------------------ + + .. code-block:: console + + Format: + + (.venv) $ python3 FCPGrowth.py <inputFile> <outputFile> <minSup> <minAllConf> <sep> + + Example Usage: + + (.venv) $ python3 FCPGrowth.py sampleTDB.txt output.txt 2 0.2 + + .. note:: minSup will be considered in percentage of database transactions + + + **Sample run of importing the code:** + ----------------------------------------- + .. code-block:: python + + from PAMI.fuzzyCorrelatedPattern.basic import FCPGrowth as alg + + obj = alg.FCPGrowth("input.txt",2,0.4) + + obj.mine() + + correlatedFuzzyFrequentPatterns = obj.getPatterns() + + print("Total number of Correlated Fuzzy Frequent Patterns:", len(correlatedFuzzyFrequentPatterns)) + + obj.save("output") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + ------------------- + + The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran. + + """ + _startTime = float() + _endTime = float() + _minSup = str() + _maxPer = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _minAllConf = 0.0 + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + + def __init__(self, iFile: str, minSup: int, minAllConf: float, sep: str="\t") -> None: + super().__init__(iFile, minSup, minAllConf, sep) + self._temp = {} + self._mapItemRegionSum = {} + self._itemsCnt = 0 + self._mapItemsLowSum = {} + self._mapItemsMidSum = {} + self._mapItemsHighSum = {} + self._mapItemSum = {} + self._mapItemRegions = {} + self._joinsCnt = 0 + self._BufferSize = 200 + self._itemSetBuffer = [] + self._finalPatterns = {} + self._dbLen = 0 + self._transactions = [] + self._fuzzyValues = [] + + def _compareItems(self, o1: _FFList, o2: _FFList) -> int: + """ + A Function that sort all FFI-list in ascending order of Support + + :param o1: First FFI-list + + :type o1: _FFList + + :param o2: Second FFI-list + + :type o1: _FFList + + :return: Comparision Value + + :rtype: int + """ + compare = self._mapItemSum[o1.item] - self._mapItemSum[o2.item] + if compare == 0: + # return int(o1.item) - int(o2.item) + return 1 + else: + return compare + + def _findElementWithTID(self, uList: _FFList, tid: int) -> Element: + """ + To find element with same tid as given + + :param uList: fuzzyList + :type uList: FFI-List + :param tid: transaction id + :type tid: int + :return: element eith tid as given + :rtype: element if exists or None + """ + List = uList.elements + first = 0 + last = len(List) - 1 + while first <= last: + mid = (first + last) >> 1 + if List[mid].tid < tid: + first = mid + 1 + elif List[mid].tid > tid: + last = mid - 1 + else: + return List[mid] + return None + + def _convert(self, value: Union[int, float, str]) -> float: + """ + To convert the given user specified value + + :param value: user specified value + + :return: converted value + + :rtype: float + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._transactions) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._transactions) * value) + else: + value = int(value) + return value + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + + :return: None + """ + self._transactions, self._fuzzyValues = [], [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._transactions = self._iFile['Transactions'].tolist() + if 'fuzzyValues' in i: + self._fuzzyValues = self._iFile['Utilities'].tolist() + # print(self.Database) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line = line.decode("utf-8") + line = line.split("\n")[0] + parts = line.split(":") + parts[0] = parts[0].strip() + parts[1] = parts[1].strip() + items = parts[0].split(self._sep) + quantities = parts[1].split(self._sep) + self._transactions.append([x for x in items]) + self._fuzzyValues.append([x for x in quantities]) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.split("\n")[0] + parts = line.split(":") + items = parts[0].split() + quantities = parts[1].split() + self._transactions.append([x for x in items]) + self._fuzzyValues.append([x for x in quantities]) + except IOError: + print("File Not Found") + quit() + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + Frequent pattern mining process will startTime from here + """ + self.mine()
+ + + +
+[docs] + def mine(self) -> None: + """ + Frequent pattern mining process will startTime from here + """ + self._startTime = _ab._time.time() + self._creatingItemSets() + for tr in range(len(self._transactions)): + items = self._transactions[tr] + quantities = self._fuzzyValues[tr] + for i in range(0, len(items)): + item = items[i] + regions = _Regions(item, float(quantities[i]), 3, self._mapItemRegionSum) + if item in self._mapItemsLowSum.keys(): + low = self._mapItemsLowSum[item] + low += regions.low + self._mapItemsLowSum[item] = low + else: + self._mapItemsLowSum[item] = regions.low + if item in self._mapItemsMidSum.keys(): + mid = self._mapItemsMidSum[item] + mid += regions.middle + self._mapItemsMidSum[item] = mid + else: + self._mapItemsMidSum[item] = regions.middle + if item in self._mapItemsHighSum.keys(): + high = self._mapItemsHighSum[item] + high += regions.high + self._mapItemsHighSum[item] = high + else: + self._mapItemsHighSum[item] = regions.high + listOfFFIList = [] + mapItemsToFFLIST = {} + self._minSup = self._convert(self._minSup) + #minSup = self._minSup + self._minAllConf = float(self._minAllConf) + for item1 in self._mapItemsLowSum.keys(): + item = item1 + region = 'N' + low = self._mapItemsLowSum[item] + mid = self._mapItemsMidSum[item] + high = self._mapItemsHighSum[item] + if low >= mid and low >= high: + self._mapItemSum[item] = low + self._mapItemRegions[item] = "L" + region = 'L' + elif mid >= low and mid >= high: + self._mapItemSum[item] = mid + self._mapItemRegions[item] = "M" + region = 'M' + elif high >= low and high >= mid: + self._mapItemRegions[item] = "H" + region = 'H' + self._mapItemSum[item] = high + if self._mapItemSum[item] >= self._minSup: + fuList = _FFList(item, region) + mapItemsToFFLIST[item] = fuList + listOfFFIList.append(fuList) + listOfFFIList.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + tid = 0 + for tr in range(len(self._transactions)): + items = self._transactions[tr] + quantities = self._fuzzyValues[tr] + revisedTransaction = [] + for i in range(0, len(items)): + pair = _Pair() + pair.item = items[i] + regions = _Regions(pair.item, float(quantities[i]), 3, self._temp) + item = pair.item + if self._mapItemSum[item] >= self._minSup: + if self._mapItemRegions[pair.item] == "L": + pair.quantity = regions.low + pair.region = 'L' + elif self._mapItemRegions[pair.item] == "M": + pair.region = 'M' + pair.quantity = regions.middle + elif self._mapItemRegions[pair.item] == "H": + pair.quantity = regions.high + pair.region = 'H' + if pair.quantity > 0: + revisedTransaction.append(pair) + revisedTransaction.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + for i in range(len(revisedTransaction) - 1, -1, -1): + pair = revisedTransaction[i] + remainUtil = 0 + for j in range(len(revisedTransaction) - 1, i - 1, -1): + remainUtil += revisedTransaction[j].quantity + if pair.quantity > remainUtil: + remainingUtility = pair.quantity + else: + remainingUtility = remainUtil + if mapItemsToFFLIST.get(pair.item) is not None: + FFListOfItem = mapItemsToFFLIST[pair.item] + element = Element(tid, pair.quantity, remainingUtility) + FFListOfItem.addElement(element) + tid += 1 + self._FSFIMining(self._itemSetBuffer, 0, listOfFFIList, self._minSup) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Fuzzy Correlated Patterns Successfully generated using FCPGrowth algorithms")
+ + + def _FSFIMining(self, prefix: List[_FFList], prefixLen: int, FSFIM: List[_FFList], minSup: float) -> None: + """ + Generates FFSI from prefix + + :param prefix: the prefix patterns of FFSI + :type prefix: len + :param prefixLen: the length of prefix + :type prefixLen: int + :param FSFIM: the Fuzzy list of prefix itemSets + :type FSFIM: list + :param minSup: the minimum support of + :type minSup:int + """ + for i in range(0, len(FSFIM)): + X = FSFIM[i] + if X.sumIUtil >= minSup: + ratio = self._getRatio(prefix, prefixLen, X) + if ratio >= self._minAllConf: + self._WriteOut(prefix, prefixLen, X, ratio) + if X.sumRUtil >= minSup: + exULs = [] + for j in range(i + 1, len(FSFIM)): + Y = FSFIM[j] + exULs.append(self._construct(X, Y)) + self._joinsCnt += 1 + self._itemSetBuffer.insert(prefixLen, X) + self._FSFIMining(self._itemSetBuffer, prefixLen + 1, exULs, minSup) + + def _construct(self, px: _FFList, py: _FFList) -> _FFList: + """ + A function to construct a new Fuzzy itemSet from 2 fuzzy itemSets + + :param px:the itemSet px + :type px:FFI-List + :param py:itemSet py + :type py:FFI-List + :return :the itemSet of pxy(px and py) + :rtype :FFI-List + """ + pxyUL = _FFList(py.item, py.region) + for ex in px.elements: + ey = self._findElementWithTID(py, ex.tid) + if ey is None: + continue + eXY = Element(ex.tid, min([ex.IUtils, ey.IUtils], key=lambda x: float(x)), ey.RUtils) + pxyUL.addElement(eXY) + return pxyUL + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime - self._startTime
+ + + def _getRatio(self, prefix: List[_FFList], prefixLen: int, item: _FFList) -> float: + """ + Method to calculate the ration of itemSet + + :param prefix: prefix of itemSet + :type prefix: list + :param prefixLen: length of prefix + :type prefixLen: int + :param item: the last item + :type item: FFList + :return : correlated ratio + :rtype: float + """ + res = 1.0 + n = prefixLen + for i in prefix: + if self._mapItemRegionSum.get((i.item, i.region)) is not None and res < self._mapItemRegionSum[(i.item, i.region)]: + res = self._mapItemRegionSum[(i.item, i.region)] + if self._mapItemRegionSum.get((item.item, item.region)) is not None and res < self._mapItemRegionSum[ + (item.item, item.region)]: + res = self._mapItemRegionSum[(item.item, item.region)] + return item.sumIUtil / res + + def _WriteOut(self, prefix: List[_FFList], prefixLen: int, item: _FFList, ratio: float) -> None: + """ + To Store the patten + + :param prefix: prefix of itemSet + :type prefix: list + :param prefixLen: length of prefix + :type prefixLen: int + :param item: the last item + :type item: FFList + :param ratio: the ratio of itemSet + :type ratio: float + :return: None + """ + self._itemsCnt += 1 + res = "" + for i in range(0, prefixLen): + res += str(prefix[i].item) + "." + str(prefix[i].region) + '\t' + res += str(item.item) + "." + str(item.region) + #res1 = str(item.sumIUtil) + " : " + str(ratio) + "\n" + self._finalPatterns[res] = [item.sumIUtil, ratio] + +
+[docs] + def getPatterns(self) -> Dict[str, List[float]]: + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + dataframe = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b[0], b[1]]) + dataframe = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Confidence']) + return dataframe
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the result + """ + print("Total number of Fuzzy Correlated Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + +
+[docs] +def main(): + inputFile = 'https://u-aizu.ac.jp/~udayrage/datasets/fuzzyDatabases/Fuzzy_T10I4D100K.csv' + + minimumSupportCount=1200 #Users can also specify this constraint between 0 to 1. + ratioExample=0.8 + seperator='\t' + + obj = FCPGrowth(inputFile, minimumSupportCount,ratioExample,seperator) #initialize + obj.mine()
+ + + +if __name__ == "__main__": + main() + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = FCPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], float(_ab._sys.argv[4]), _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = FCPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], float(_ab._sys.argv[4])) + _ap.startMine() + _ap.mine() + print("Total number of Fuzzy Correlated Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/fuzzyFrequentPattern/basic/FFIMiner.html b/sphinx/_build/html/_modules/PAMI/fuzzyFrequentPattern/basic/FFIMiner.html new file mode 100644 index 000000000..05b420aa3 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/fuzzyFrequentPattern/basic/FFIMiner.html @@ -0,0 +1,791 @@ + + + + + + PAMI.fuzzyFrequentPattern.basic.FFIMiner — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.fuzzyFrequentPattern.basic.FFIMiner

+# Fuzzy Frequent  Pattern-Miner is desired to find all  frequent fuzzy patterns which is on-trivial and challenging problem
+#
+# to its huge search space.we are using efficient pruning techniques to reduce the search space.
+#
+# **Importing this algorithm into a python program**
+# ---------------------------------------------------------
+#
+#             from PAMI.fuzzyFrequentPattern import FFIMiner as alg
+#
+#             obj = alg.FFIMiner("input.txt", 2)
+#
+#             obj.mine()
+#
+#             fuzzyFrequentPattern = obj.getPatterns()
+#
+#             print("Total number of Fuzzy Frequent Patterns:", len(fuzzyFrequentPattern))
+#
+#             obj.save("outputFile")
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.fuzzyFrequentPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+from deprecated import deprecated
+
+
+class _FFList:
+    """
+    A class represent a Fuzzy List of an element
+
+    :Attributes:
+
+        item: int
+            the item name
+
+        sumIUtil: float
+            the sum of utilities of a fuzzy item in database
+
+        sumRUtil: float
+            the sum of resting values of a fuzzy item in database
+
+        elements: list
+            a list of elements contain tid,Utility and resting values of element in each transaction
+
+    :Methods:
+
+        addElement(element)
+            Method to add an element to this fuzzy list and update the sums at the same time.
+
+        printElement(e)
+            Method to print elements
+    """
+
+    def __init__(self, itemName: int) -> None:
+        self.item = itemName
+        self.sumIUtil = 0.0
+        self.sumRUtil = 0.0
+        self.elements = []
+
+    def addElement(self, element) -> None:
+        """
+        A Method that add a new element to FFList
+
+        :param element: an element to be added to FFList
+        :type element: Element
+        :return: None
+        """
+        self.sumIUtil += element.iUtils
+        self.sumRUtil += element.rUtils
+        self.elements.append(element)
+
+    def printElement(self) -> None:
+        """
+        A method to print elements
+        """
+        for ele in self.elements:
+            print(ele.tid, ele.iUtils, ele.rUtils)
+
+
+class _Element:
+    """
+    A class represents an Element of a fuzzy list
+
+    :Attributes:
+
+        tid : int
+            keep tact of transaction id
+
+        iUtils: float
+            the utility of a fuzzy item in the transaction
+
+        rUtils : float
+            the  resting value of a fuzzy item in the transaction
+    """
+
+    def __init__(self, tid: int, iUtil: float, rUtil: float) -> None:
+        self.tid = tid
+        self.iUtils = iUtil
+        self.rUtils = rUtil
+
+
+class _Pair:
+    """
+    A class to store item and it's quantity together
+    """
+
+    def __init__(self) -> None:
+        self.item = 0
+        self.quantity = 0
+
+
+
+[docs] +class FFIMiner(_ab._fuzzyFrequentPattenrs): + """ + :Description: Fuzzy Frequent Pattern-Miner is desired to find all frequent fuzzy patterns which is on-trivial and challenging problem + to its huge search space.we are using efficient pruning techniques to reduce the search space. + + :Reference: Lin, Chun-Wei & Li, Ting & Fournier Viger, Philippe & Hong, Tzung-Pei. (2015). + A fast Algorithm for mining fuzzy frequent itemsets. Journal of Intelligent & Fuzzy Systems. 29. + 2373-2379. 10.3233/IFS-151936. + https://www.researchgate.net/publication/286510908_A_fast_Algorithm_for_mining_fuzzy_frequent_itemSets + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float. + :param maxPer: float : + The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + :param fuzFile: str : + The user can specify fuzFile. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : string + Name of the input file to mine complete set of fuzzy frequent patterns + + fmFile : string + Name of the fuzzy membership file to mine complete set of fuzzy frequent patterns + + oFile : string + Name of the oFile file to store complete set of fuzzy frequent patterns + + minSup : float + The user given minimum support + + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + startTime:float + To record the start time of the mining process + + endTime:float + To record the completion time of the mining process + + itemsCnt: int + To record the number of fuzzy spatial itemSets generated + + mapItemSum: map + To keep track of sum of Fuzzy Values of items + + joinsCnt: int + To keep track of the number of ffi-list that was constructed + + BufferSize: int + represent the size of Buffer + + itemSetBuffer list + to keep track of items in buffer + + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + convert(value) + To convert the given user specified value + compareItems(o1, o2) + A Function that sort all ffi-list in ascending order of Support + FSFIMining(prefix, prefixLen, FSFIM, minSup) + Method generate ffi from prefix + construct(px, py) + A function to construct Fuzzy itemSet from 2 fuzzy itemSets + findElementWithTID(uList, tid) + To find element with same tid as given + WriteOut(prefix, prefixLen, item, sumIUtil) + To Store the patten + + + **Executing the code on terminal :** + ------------------------------------------ + + .. code-block:: console + + Format: + + (.venv) $ python3 FFIMiner.py <inputFile> <outputFile> <minSup> <separator> + + Example Usage: + + (.venv) $ python3 FFIMiner.py sampleTDB.txt output.txt 6 + + .. note:: minSup will be considered in percentage of database transactions + + + **Sample run of importing the code:** + ------------------------------------------ + .. code-block:: python + + from PAMI.fuzzyFrequentPattern import FFIMiner as alg + + obj = alg.FFIMiner("input.txt", 2) + + obj.mine() + + fuzzyFrequentPattern = obj.getPatterns() + + print("Total number of Fuzzy Frequent Patterns:", len(fuzzyFrequentPattern)) + + obj.save("outputFile") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + + **Credits:** + --------------- + The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran. + + """ + + _startTime = float() + _endTime = float() + _minSup = str() + _maxPer = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _fuzFile = " " + _memoryUSS = float() + _memoryRSS = float() + _sep = "\t" + + def __init__(self, iFile: str, minSup: float, sep: str="\t") -> None: + super().__init__(iFile, minSup, sep) + self._startTime = 0 + self._endTime = 0 + self._itemsCnt = 0 + self._mapItemSum = {} + self._joinsCnt = 0 + self._BufferSize = 200 + self._itemSetBuffer = [] + self._transactions = [] + self._fuzzyValues = [] + self._finalPatterns = {} + self._dbLen = 0 + + def _compareItems(self, o1: _FFList, o2: _FFList) -> int: + """ + A Function that sort all ffi-list in ascending order of Support + + :param o1: First FFI-list + :type o1: _FFList + :param o2: Second FFI-list + :type o1: _FFList + :return: Comparision Value + :rtype: int + """ + compare = self._mapItemSum[o1.item] - self._mapItemSum[o2.item] + if compare == 0: + if o1.item < o2.item: + return -1 + elif o1.item > o2.item: + return 1 + else: + return 0 + else: + return compare + + def _convert(self, value) -> Union[int, float]: + """ + To convert the given user specified value + + :param value: user specified value + :type value: int or float or str + :return: converted value + :rtype: int or float + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (self._dbLen * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (self._dbLen * value) + else: + value = int(value) + return value + + def _creatingItemsets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._transactions, self._fuzzyValues, self._Database = [], [], [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._transactions = self._iFile['Transactions'].tolist() + if 'fuzzyValues' in i: + self._fuzzyValues = self._iFile['fuzzyValues'].tolist() + # print(self.Database) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line = line.decode("utf-8") + line = line.split("\n")[0] + parts = line.split(":") + parts[0] = parts[0].strip() + parts[1] = parts[1].strip() + items = parts[0].split(self._sep) + quantities = parts[1].split(self._sep) + self._transactions.append([x for x in items]) + self._fuzzyValues.append([float(x) for x in quantities]) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.split("\n")[0] + parts = line.split(":") + parts[0] = parts[0].strip() + parts[1] = parts[1].strip() + items = parts[0].split(self._sep) + quantities = parts[1].split(self._sep) + self._transactions.append([x for x in items]) + self._fuzzyValues.append([float(x) for x in quantities]) + except IOError: + print("File Not Found") + quit() + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + fuzzy-Frequent pattern mining process will start from here + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + fuzzy-Frequent pattern mining process will start from here + """ + self._startTime = _ab._time.time() + self._creatingItemsets() + for line in range(len(self._transactions)): + items = self._transactions[line] + quantities = self._fuzzyValues[line] + self._dbLen += 1 + for i in range(0, len(items)): + item = items[i] + if item in self._mapItemSum: + self._mapItemSum[item] += quantities[i] + else: + self._mapItemSum[item] = quantities[i] + listOfffilist = [] + mapItemsToFFLIST = {} + #self._minSup = self._convert(self._minSup) + # minSup = self.minSup + for item1 in self._mapItemSum.keys(): + item = item1 + if self._mapItemSum[item] >= self._minSup: + fuList = _FFList(item) + mapItemsToFFLIST[item] = fuList + listOfffilist.append(fuList) + listOfffilist.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + tid = 0 + for line in range(len(self._transactions)): + items = self._transactions[line] + quantities = self._fuzzyValues[line] + revisedTransaction = [] + for i in range(0, len(items)): + pair = _Pair() + pair.item = items[i] + pair.quantity = quantities[i] + item = pair.item + if self._mapItemSum[item] >= self._minSup: + if pair.quantity > 0: + revisedTransaction.append(pair) + revisedTransaction.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + for i in range(len(revisedTransaction) - 1, -1, -1): + pair = revisedTransaction[i] + remainUtil = 0 + for j in range(len(revisedTransaction) - 1, i, -1): + remainUtil += revisedTransaction[j].quantity + remainingUtility = remainUtil + if mapItemsToFFLIST.get(pair.item) is not None: + FFListOfItem = mapItemsToFFLIST[pair.item] + element = _Element(tid, pair.quantity, remainingUtility) + FFListOfItem.addElement(element) + tid += 1 + self._FFIMining(self._itemSetBuffer, 0, listOfffilist, self._minSup) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + + def _FFIMining(self, prefix, prefixLen, FSFIM, minSup): + """ + Generates ffi from prefix + + :param prefix: the prefix patterns of ffi + :type prefix: len + :param prefixLen: the length of prefix + :type prefixLen: int + :param FSFIM: the Fuzzy list of prefix itemSets + :type FSFIM: list + :param minSup: the minimum support of + :type minSup: int or float + """ + for i in range(0, len(FSFIM)): + X = FSFIM[i] + if X.sumIUtil >= minSup: + self._WriteOut(prefix, prefixLen, X.item, X.sumIUtil) + if X.sumRUtil >= minSup: + exULs = [] + for j in range(i + 1, len(FSFIM)): + Y = FSFIM[j] + exULs.append(self._construct(X, Y)) + self._joinsCnt += 1 + self._itemSetBuffer.insert(prefixLen, X.item) + self._FFIMining(self._itemSetBuffer, prefixLen + 1, exULs, minSup) + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime - self._startTime
+ + + def _construct(self, px, py) -> _FFList: + """ + A function to construct a new Fuzzy itemSet from 2 fuzzy itemSets + + :param px:the itemSet px + :type px:ffi-List + :param py:itemSet py + :type py:ffi-List + :return :the itemSet of pxy(px and py) + :rtype :ffi-List + """ + pxyUL = _FFList(py.item) + for ex in px.elements: + ey = self._findElementWithTID(py, ex.tid) + if ey is None: + continue + eXY = _Element(ex.tid, min([ex.iUtils, ey.iUtils], key=lambda x: float(x)), ey.rUtils) + pxyUL.addElement(eXY) + return pxyUL + + def _findElementWithTID(self, uList, tid) -> _Element: + """ + To find element with same tid as given + + :param uList: fuzzyList + :type uList: ffi-List + :param tid: transaction id + :type tid: int + :return: element tid as given + :rtype: element if exit or None + """ + List = uList.elements + first = 0 + last = len(List) - 1 + while first <= last: + mid = (first + last) >> 1 + if List[mid].tid < tid: + first = mid + 1 + elif List[mid].tid > tid: + last = mid - 1 + else: + return List[mid] + return None + + def _WriteOut(self, prefix: list, prefixLen: int, item: int, sumIUtil: float) -> None: + """ + To Store the patten + + :param prefix: prefix of itemSet + :type prefix: list + :param prefixLen: length of prefix + :type prefixLen: int + :param item: the last item + :type item: int + :param sumIUtil: sum of utility of itemSet + :type sumIUtil: float + :return: None + """ + self._itemsCnt += 1 + res = "" + for i in range(0, prefixLen): + res += str(prefix[i]) + "\t" + res += str(item) + res1 = str(sumIUtil) + self._finalPatterns[res] = res1 + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def getPatterns(self) -> dict: + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def save(self, outFile) -> dict: + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + :return: dictionary of frequent patterns + :rtype: dict + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + """ + print("Total number of Fuzzy Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in seconds:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = FFIMiner(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = FFIMiner(_ab._sys.argv[1], _ab._sys.argv[3]) + _ap.startMine() + _ap.mine() + print("Total number of Fuzzy-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + _ap = FFIMiner('sample.txt', 1, ' ') + _ap.startMine() + _ap.mine() + print("Total number of Fuzzy-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save('output.txt') + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + print("Error! The number of input parameters do not match the total number of parameters provided") + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/fuzzyFrequentPattern/basic/FFIMiner_old.html b/sphinx/_build/html/_modules/PAMI/fuzzyFrequentPattern/basic/FFIMiner_old.html new file mode 100644 index 000000000..f4784e216 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/fuzzyFrequentPattern/basic/FFIMiner_old.html @@ -0,0 +1,908 @@ + + + + + + PAMI.fuzzyFrequentPattern.basic.FFIMiner_old — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.fuzzyFrequentPattern.basic.FFIMiner_old

+# Fuzzy Frequent  Pattern-Miner is desired to find all  frequent fuzzy patterns which is on-trivial and challenging problem
+#
+# to its huge search space.we are using efficient pruning techniques to reduce the search space.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.fuzzyFrequentPattern import FFIMiner_old as alg
+#
+#             obj = alg.FFIMiner("input.txt", 2)
+#
+#             obj.mine()
+#
+#             fuzzyFrequentPattern = obj.getPatterns()
+#
+#             print("Total number of Fuzzy Frequent Patterns:", len(fuzzyFrequentPattern))
+#
+#             obj.save("outputFile")
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+from PAMI.fuzzyFrequentPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+from deprecated import deprecated
+
+
+class _FFList:
+    """
+    A class represent a Fuzzy List of an element
+
+    :Attributes:
+
+         item: int
+             the item name
+         sumIUtil: float
+             the sum of utilities of a fuzzy item in database
+         sumRUtil: float
+             the sum of resting values of a fuzzy item in database
+         elements: list
+             a list of elements contain tid,Utility and resting values of element in each transaction
+
+    :Methods:
+
+        addElement(element)
+            Method to add an element to this fuzzy list and update the sums at the same time.
+        printElement(e)
+            Method to print elements
+    """
+
+    def __init__(self, itemName: int) -> None:
+        self.item = itemName
+        self.sumIUtil = 0.0
+        self.sumRUtil = 0.0
+        self.elements = []
+
+    def addElement(self, element) -> None:
+        """
+        A Method that add a new element to FFList
+
+        :param element: an element to be added to FFList
+        :param element: Element
+        :return: None
+        """
+        self.sumIUtil += element.iUtils
+        self.sumRUtil += element.rUtils
+        self.elements.append(element)
+
+    def printElement(self) -> None:
+        """
+        A method to print elements
+        :return: None
+        """
+        for ele in self.elements:
+            print(ele.tid, ele.iUtils, ele.rUtils)
+
+
+class _Element:
+    """
+    A class represents an Element of a fuzzy list
+
+    :Attributes:
+
+        tid : int
+            keep tact of transaction id
+        iUtils: float
+            the utility of a fuzzy item in the transaction
+        rUtils : float
+            the  resting value of a fuzzy item in the transaction
+    """
+
+    def __init__(self, tid: int, iUtil: float, rUtil: float) -> None:
+        self.tid = tid
+        self.iUtils = iUtil
+        self.rUtils = rUtil
+
+
+class _Regions:
+    """
+    A class calculate the regions
+
+    :Attributes:
+
+        low : int
+            low region value
+        middle: int
+            middle region value
+        high : int
+            high region values
+    """
+
+    def __init__(self, quantity: int, regionsNumber: int) -> None:
+        self.low = 0
+        self.middle = 0
+        self.high = 0
+        if regionsNumber == 3:  # if we have 3 regions
+            if 0 < quantity <= 1:
+                self.low = 1
+                self.high = 0
+                self.middle = 0
+            elif 1 < quantity <= 6:
+                self.low = float((6 - quantity) / 5)
+                self.middle = float((quantity - 1) / 5)
+                self.high = 0
+            elif 6 < quantity <= 11:
+                self.low = 0
+                self.middle = float((11 - quantity) / 5)
+                self.high = float((quantity - 6) / 5)
+            else:
+                self.low = 0
+                self.middle = 0
+                self.high = 1
+
+
+class _Pair:
+    """
+    A class to store item and it's quantity together
+    """
+
+    def __init__(self) -> None:
+        self.item = 0
+        self.quantity = 0
+
+
+
+[docs] +class FFIMiner(_ab._fuzzyFrequentPattenrs): + """ + :Description: Fuzzy Frequent Pattern-Miner is desired to find all frequent fuzzy patterns which is on-trivial and challenging problem + to its huge search space.we are using efficient pruning techniques to reduce the search space. + + :Reference: Lin, Chun-Wei & Li, Ting & Fournier Viger, Philippe & Hong, Tzung-Pei. (2015). + A fast Algorithm for mining fuzzy frequent itemsets. Journal of Intelligent & Fuzzy Systems. 29. + 2373-2379. 10.3233/IFS-151936. + https://www.researchgate.net/publication/286510908_A_fast_Algorithm_for_mining_fuzzy_frequent_itemSets + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float. + :param maxPer: float : + The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + :param fuzFile: str : + The user can specify fuzFile. + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : string + Name of the input file to mine complete set of fuzzy frequent patterns + fmFile : string + Name of the fuzzy membership file to mine complete set of fuzzy frequent patterns + oFile : string + Name of the oFile file to store complete set of fuzzy frequent patterns + minSup : float + The user given minimum support + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + itemsCnt: int + To record the number of fuzzy spatial itemSets generated + mapItemsLowSum: map + To keep track of low region values of items + mapItemsMidSum: map + To keep track of middle region values of items + mapItemsHighSum: map + To keep track of high region values of items + mapItemSum: map + To keep track of sum of Fuzzy Values of items + mapItemRegions: map + To Keep track of fuzzy regions of item + jointCnt: int + To keep track of the number of ffi-list that was constructed + BufferSize: int + represent the size of Buffer + itemBuffer list + to keep track of items in buffer + + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + convert(value) + To convert the given user specified value + compareItems(o1, o2) + A Function that sort all ffi-list in ascending order of Support + FSFIMining(prefix, prefixLen, FSFIM, minSup) + Method generate ffi from prefix + construct(px, py) + A function to construct Fuzzy itemSet from 2 fuzzy itemSets + findElementWithTID(uList, tid) + To find element with same tid as given + WriteOut(prefix, prefixLen, item, sumIUtil) + To Store the patten + + **Executing the code on terminal :** + ----------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 FFIMinerMiner.py <inputFile> <outputFile> <minSup> <separator> + + Example Usage: + + (.venv) $ python3 FFIMinerMiner.py sampleTDB.txt output.txt 6 + + (.venv) $ python3 FFIMinerMiner.py sampleTDB.txt output.txt 0.3 + + .. note:: minSup will be considered in percentage of database transactions + + + **Sample run of importing the code:** + ---------------------------------------- + + from PAMI.fuzzyFrequentPattern import FFIMiner as alg + + obj = alg.FFIMiner("input.txt", "fuzzyMembership.txt" 2) + + obj.mine() + + fuzzyFrequentPattern = obj.getPatterns() + + print("Total number of Fuzzy Frequent Patterns:", len(fuzzyFrequentPattern)) + + obj.save("outputFile") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + + **Credits:** + ------------- + The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran. + + """ + _startTime = float() + _endTime = float() + _minSup = str() + _maxPer = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _fuzFile = " " + _memoryUSS = float() + _memoryRSS = float() + _sep = "\t" + + def __init__(self, iFile: str, fuzFile: str, minSup: float, sep: str="\t") -> None: + super().__init__(iFile, fuzFile, minSup, sep) + self._startTime = 0 + self._endTime = 0 + self._itemsCnt = 0 + self._mapItemsLowSum = {} + self._mapItemsMidSum = {} + self._mapItemsHighSum = {} + self._mapItemSum = {} + self._mapItemRegions = {} + self._joinsCnt = 0 + self._BufferSize = 200 + self._itemSetBuffer = [] + self._transactions = [] + self._fuzzyValues = [] + self._finalPatterns = {} + self._RegionsCal = [] + self._LabelKeyOne = {} + self._LabelKey = {} + self._RegionsLabel = [] + self._dbLen = 0 + + def _compareItems(self, o1: _FFList, o2: _FFList) -> int: + """ + A Function that sort all ffi-list in ascending order of Support + + :param o1: First FFI-list + :type o1: _FFList + :param o2: Second FFI-list + :type o2: _FFList + :return: Comparison Value + :rtype: int + """ + compare = self._mapItemSum[o1.item] - self._mapItemSum[o2.item] + if compare == 0: + if o1.item < o2.item: + return -1 + elif o1.item > o2.item: + return 1 + else: + return 0 + else: + return compare + + def _convert(self, value: Union[int, float, str]) -> float: + """ + To convert the given user specified value + + :param value: user specified value + :type value: int or float or str + :return: converted value + :rtype: float + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (self._dbLen * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (self._dbLen * value) + else: + value = int(value) + return value + + def _fuzzyMembershipFunc(self) -> None: + try: + with open(self._fuzFile, 'r', encoding='utf-8') as f: + count = 0 + for line in f: + line = line.split("\n")[0] + parts = line.split(" ") + lowerBound = parts[0].strip() + upperBound = parts[1].strip() + lb_Label = parts[2].strip() + ub_Label = parts[3].strip() + self._RegionsCal.append([int(lowerBound), int(upperBound)]) + self._RegionsLabel.append([lb_Label, ub_Label]) + for i in range(0, 2): + if lb_Label.capitalize() not in self._LabelKey: + self._LabelKey[lb_Label.capitalize()] = count + count += 1 + if ub_Label.capitalize() not in self._LabelKey: + self._LabelKey[ub_Label.capitalize()] = count + count += 1 + self._LabelKeyOne = {v:k for k,v in self._LabelKey.items()} + print(self._LabelKey) + print(self._LabelKeyOne) + print(self._RegionsLabel) + print(self._RegionsCal) + except IOError: + print("File Not Found") + quit() + + def _creatingItemsets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._transactions, self._fuzzyValues, self._Database = [], [], [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._transactions = self._iFile['Transactions'].tolist() + if 'fuzzyValues' in i: + self._fuzzyValues = self._iFile['fuzzyValues'].tolist() + # print(self.Database) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line = line.decode("utf-8") + line = line.split("\n")[0] + parts = line.split(":") + parts[0] = parts[0].strip() + parts[2] = parts[2].strip() + items = parts[0].split(self._sep) + quantities = parts[2].split(self._sep) + self._transactions.append([x for x in items]) + self._fuzzyValues.append([x for x in quantities]) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.split("\n")[0] + parts = line.split(":") + parts[0] = parts[0].strip() + parts[2] = parts[2].strip() + items = parts[0].split(self._sep) + quantities = parts[2].split(self._sep) + self._transactions.append([x for x in items]) + self._fuzzyValues.append([x for x in quantities]) + except IOError: + print("File Not Found") + quit() + + def _Regions(self, quantity: float) -> None: + """ + :param quantity: Quantity to calculate regions + :type quantity: float + :return: None + """ + self.list = [0] * len(self._LabelKey) + if self._RegionsCal[0][0] < quantity <= self._RegionsCal[0][1]: + self.list[0] = 1 + return + elif quantity >= self._RegionsCal[-1][0]: + self.list[-1] = 1 + return + else: + for i in range(1, len(self._RegionsCal) - 1): + if self._RegionsCal[i][0] < quantity <= self._RegionsCal[i][1]: + base = self._RegionsCal[i][1] - self._RegionsCal[i][0] + for pos in range(0, 2): + if self._RegionsLabel[i][pos].islower(): + self.list[self._LabelKey[self._RegionsLabel[i][pos].capitalize()]] = float( + (self._RegionsCal[i][1] - quantity) / base) + else: + self.list[self._LabelKey[self._RegionsLabel[i][pos].capitalize()]] = float( + (quantity - self._RegionsCal[i][0]) / base) + return + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + fuzzy-Frequent pattern mining process will start from here + """ + self.mine()
+ + + +
+[docs] + def mine(self) -> None: + """ + fuzzy-Frequent pattern mining process will start from here + """ + self._startTime = _ab._time.time() + self._creatingItemsets() + for line in range(len(self._transactions)): + items = self._transactions[line] + quantities = self._fuzzyValues[line] + self._dbLen += 1 + for i in range(0, len(items)): + regions = self._Regions(float(quantities[i])) + print(regions) + item = items[i] + if item in self._mapItemsLowSum.keys(): + low = self._mapItemsLowSum[item] + low += regions.low + self._mapItemsLowSum[item] = low + else: + self._mapItemsLowSum[item] = regions.low + if item in self._mapItemsMidSum.keys(): + mid = self._mapItemsMidSum[item] + mid += regions.middle + self._mapItemsMidSum[item] = mid + else: + self._mapItemsMidSum[item] = regions.middle + if item in self._mapItemsHighSum.keys(): + high = self._mapItemsHighSum[item] + high += regions.high + self._mapItemsHighSum[item] = high + else: + self._mapItemsHighSum[item] = regions.high + listOfffilist = [] + mapItemsToFFLIST = {} + self._minSup = self._convert(self._minSup) + # minSup = self.minSup + for item1 in self._mapItemsLowSum.keys(): + item = item1 + low = self._mapItemsLowSum[item] + mid = self._mapItemsMidSum[item] + high = self._mapItemsHighSum[item] + if low >= mid and low >= high: + self._mapItemSum[item] = low + self._mapItemRegions[item] = "L" + elif mid >= low and mid >= high: + self._mapItemSum[item] = mid + self._mapItemRegions[item] = "M" + elif high >= low and high >= mid: + self._mapItemRegions[item] = "H" + self._mapItemSum[item] = high + if self._mapItemSum[item] >= self._minSup: + fuList = _FFList(item) + mapItemsToFFLIST[item] = fuList + listOfffilist.append(fuList) + listOfffilist.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + tid = 0 + for line in range(len(self._transactions)): + items = self._transactions[line] + quantities = self._fuzzyValues[line] + revisedTransaction = [] + for i in range(0, len(items)): + pair = _Pair() + pair.item = items[i] + regions = self._Regions(float(quantities[i]), 3) + item = pair.item + if self._mapItemSum[item] >= self._minSup: + if self._mapItemRegions[pair.item] == "L": + pair.quantity = regions.low + elif self._mapItemRegions[pair.item] == "M": + pair.quantity = regions.middle + elif self._mapItemRegions[pair.item] == "H": + pair.quantity = regions.high + if pair.quantity > 0: + revisedTransaction.append(pair) + revisedTransaction.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + for i in range(len(revisedTransaction) - 1, -1, -1): + pair = revisedTransaction[i] + remainUtil = 0 + for j in range(len(revisedTransaction) - 1, i, -1): + remainUtil += revisedTransaction[j].quantity + remainingUtility = remainUtil + if mapItemsToFFLIST.get(pair.item) is not None: + FFListOfItem = mapItemsToFFLIST[pair.item] + element = _Element(tid, pair.quantity, remainingUtility) + FFListOfItem.addElement(element) + tid += 1 + self._FSFIMining(self._itemSetBuffer, 0, listOfffilist, self._minSup) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + + def _FSFIMining(self, prefix: List[int], prefixLen: int, FSFIM: List[_FFList], minSup: float) -> None: + """Generates ffi from prefix + + :param prefix: the prefix patterns of ffi + :type prefix: list + :param prefixLen: the length of prefix + :type prefixLen: int + :param FSFIM: the Fuzzy list of prefix itemSets + :type FSFIM: list + :param minSup: the minimum support of + :type minSup: float + :return: None + """ + for i in range(0, len(FSFIM)): + X = FSFIM[i] + if X.sumIUtil >= minSup: + self._WriteOut(prefix, prefixLen, X.item, X.sumIUtil) + if X.sumRUtil >= minSup: + exULs = [] + for j in range(i + 1, len(FSFIM)): + Y = FSFIM[j] + exULs.append(self._construct(X, Y)) + self._joinsCnt += 1 + self._itemSetBuffer.insert(prefixLen, X.item) + self._FSFIMining(self._itemSetBuffer, prefixLen + 1, exULs, minSup) + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime - self._startTime
+ + + def _construct(self, px: _FFList, py: _FFList) -> _FFList: + """ + A function to construct a new Fuzzy itemSet from 2 fuzzy itemSets + + :param px:the itemSet px + :type px:ffi-List + :param py:itemSet py + :type py:ffi-List + :return :the itemSet of pxy(px and py) + :rtype :ffi-List + """ + pxyUL = _FFList(py.item) + for ex in px.elements: + ey = self._findElementWithTID(py, ex.tid) + if ey is None: + continue + eXY = _Element(ex.tid, min([ex.iUtils, ey.iUtils], key=lambda x: float(x)), ey.rUtils) + pxyUL.addElement(eXY) + return pxyUL + + def _findElementWithTID(self, uList: _FFList, tid: int) -> Union[_Element, None]: + """ + To find element with same tid as given + + :param uList: fuzzyList + :type uList: ffi-List + :param tid: transaction id + :type tid: int + :return: element tid as given + :rtype: element if exit or None + """ + List = uList.elements + first = 0 + last = len(List) - 1 + while first <= last: + mid = (first + last) >> 1 + if List[mid].tid < tid: + first = mid + 1 + elif List[mid].tid > tid: + last = mid - 1 + else: + return List[mid] + return None + + def _WriteOut(self, prefix: List[int], prefixLen: int, item: int, sumIUtil: float) -> None: + """ + To Store the patten + + :param prefix: prefix of itemSet + :type prefix: list + :param prefixLen: length of prefix + :type prefixLen: int + :param item: the last item + :type item: int + :param sumIUtil: sum of utility of itemSet + :type sumIUtil: float + :return: None + """ + self._itemsCnt += 1 + res = "" + for i in range(0, prefixLen): + res += str(prefix[i]) + "." + str(self._mapItemRegions[prefix[i]]) + "\t" + res += str(item) + "." + str(self._mapItemRegions.get(item)) + res1 = str(sumIUtil) + self._finalPatterns[res] = res1 + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def getPatterns(self) -> Dict[str, str]: + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + """ + print("Total number of Fuzzy Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in seconds:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = FFIMiner(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = FFIMiner(_ab._sys.argv[1], _ab._sys.argv[3]) + _ap.startMine() + _ap.mine() + print("Total number of Fuzzy-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/fuzzyGeoreferencedFrequentPattern/basic/FFSPMiner.html b/sphinx/_build/html/_modules/PAMI/fuzzyGeoreferencedFrequentPattern/basic/FFSPMiner.html new file mode 100644 index 000000000..6f955d4ee --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/fuzzyGeoreferencedFrequentPattern/basic/FFSPMiner.html @@ -0,0 +1,845 @@ + + + + + + PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner

+# Fuzzy Frequent Spatial Pattern-Miner is desired to find all Spatially frequent fuzzy patterns
+# which is on-trivial and challenging problem to its huge search space.we are using efficient pruning
+# techniques to reduce the search space.
+#
+# **Importing this algorithm into a python program**
+# ---------------------------------------------------------
+# .. code-block:: python
+#
+#             from PAMI.fuzzyGeoreferencedFrequentPattern import FFSPMiner as alg
+#
+#             obj = alg.FFSPMiner("input.txt", "neighbours.txt", 2)
+#
+#             obj.mine()
+#
+#             fuzzySpatialFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of fuzzy frequent spatial patterns:", len(fuzzySpatialFrequentPatterns))
+#
+#             obj.save("outputFile")
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.fuzzyGeoreferencedFrequentPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+from deprecated import deprecated
+
+
+class _FFList:
+    """
+    A class represent a Fuzzy List of an element
+
+    :Attributes:
+
+         item : int
+             the item name
+         sumIUtil : float
+             the sum of utilities of a fuzzy item in database
+         sumRUtil : float
+             the sum of resting values of a fuzzy item in database
+         elements : list
+             a list of elements contain tid,Utility and resting values of element in each transaction
+
+    :Methods:
+
+        addElement(element)
+            Method to add an element to this fuzzy list and update the sums at the same time.
+        printElement(e)
+            Method to print elements
+
+    """
+
+    def __init__(self, itemName: str) -> None:
+        self.item = itemName
+        self.sumIUtil = 0.0
+        self.sumRUtil = 0.0
+        self.elements = []
+
+    def addElement(self, element) -> None:
+        """
+        A Method that add a new element to FFList
+
+        :param element: an element to be added to FFList
+        :param element: Element
+        :return: None
+        """
+        self.sumIUtil += element.iUtils
+        self.sumRUtil += element.rUtils
+        self.elements.append(element)
+
+    def printElement(self) -> None:
+        """
+        A Method to Print elements in the FFList
+        """
+        for ele in self.elements:
+            print(ele.tid, ele.iUtils, ele.rUtils)
+
+
+class _Element:
+    """
+    A class represents an Element of a fuzzy list
+
+    :Attributes:
+
+        tid : int
+            keep tact of transaction id
+        iUtils : float
+            the utility of a fuzzy item in the transaction
+        rUtils : float
+            the neighbourhood resting value of a fuzzy item in the transaction
+    """
+
+    def __init__(self, tid: int, iUtil: float, rUtil: float) -> None:
+        self.tid = tid
+        self.iUtils = iUtil
+        self.rUtils = rUtil
+
+
+class _Pair:
+    """
+    A class to store item and it's quantity together
+    """
+
+    def __init__(self) -> None:
+        self.item = 0
+        self.quantity = 0
+
+
+
+[docs] +class FFSPMiner(_ab._fuzzySpatialFrequentPatterns): + """ + :Description: Fuzzy Frequent Spatial Pattern-Miner is desired to find all Spatially frequent fuzzy patterns + which is on-trivial and challenging problem to its huge search space.we are using efficient pruning + techniques to reduce the search space. + + :Reference: Reference: P. Veena, B. S. Chithra, R. U. Kiran, S. Agarwal and K. Zettsu, "Discovering Fuzzy Frequent + Spatial Patterns in Large Quantitative Spatiotemporal databases," 2021 IEEE International Conference on Fuzzy Systems + (FUZZ-IEEE), 2021, pp. 1-8, doi: 10.1109/FUZZ45933.2021.9494594. + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float. + :param maxPer: float : + The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + :param nFile: str : + Name of the input file to mine complete set of frequent patterns + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the input file to mine complete set of fuzzy spatial frequent patterns + oFile : file + Name of the oFile file to store complete set of fuzzy spatial frequent patterns + minSup : float + The user given minimum support + neighbors : map + keep track of neighbours of elements + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime : float + To record the start time of the mining process + endTime : float + To record the completion time of the mining process + itemsCnt : int + To record the number of fuzzy spatial itemSets generated + mapItemSum : map + To keep track of sum of Fuzzy Values of items + mapItemRegions : map + To Keep track of fuzzy regions of item + joinsCnt : int + To keep track of the number of FFI-list that was constructed + BufferSize : int + represent the size of Buffer + itemSetBuffer : list + to keep track of items in buffer + + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + convert(value) + To convert the given user specified value + FSFIMining( prefix, prefixLen, fsFim, minSup) + Method generate FFI from prefix + construct(px, py) + A function to construct Fuzzy itemSet from 2 fuzzy itemSets + Intersection(neighbourX,neighbourY) + Return common neighbours of 2 itemSet Neighbours + findElementWithTID(uList, tid) + To find element with same tid as given + WriteOut(prefix, prefixLen, item, sumIUtil,period) + To Store the patten + + **Executing the code on terminal :** + ---------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 FFSPMiner.py <inputFile> <outputFile> <neighbours> <minSup> <sep> + + Example Usage: + + (.venv) $ python3 FFSPMiner.py sampleTDB.txt output.txt sampleN.txt 3 + + .. note:: minSup will be considered in percentage of database transactions + + **Sample run of importing the code:** + ---------------------------------------- + .. code-block:: python + + from PAMI.fuzzyGeoreferencedFrequentPattern import FFSPMiner as alg + + obj = alg.FFSPMiner("input.txt", "neighbours.txt", 2) + + obj.mine() + + fuzzySpatialFrequentPatterns = obj.getPatterns() + + print("Total number of fuzzy frequent spatial patterns:", len(fuzzySpatialFrequentPatterns)) + + obj.save("outputFile") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + -------------- + The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran. + """ + + _startTime = float() + _endTime = float() + _minSup = str() + _maxPer = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _nFile = " " + _memoryUSS = float() + _memoryRSS = float() + _sep = "\t" + _transactions = [] + _fuzzyValues = [] + + def __init__(self, iFile: str, nFile: str, minSup: float, sep: str="\t") -> None: + super().__init__(iFile, nFile, minSup, sep) + self._mapItemNeighbours = {} + self._startTime = 0 + self._endTime = 0 + self._mapItemSum = {} + self._joinsCnt = 0 + self._BufferSize = 200 + self._itemSetBuffer = [] + self._finalPatterns = {} + self._dbLen = 0 + self._itemsCnt = 0 + + def _compareItems(self, o1, o2) -> int: + """ + A Function that sort all ffi-list in ascending order of Support + + :param o1: First FFI-list + :type o1: _FFList + :param o2: Second FFI-list + :type o2: _FFList + :return: Comparision Value + + :rtype: int + """ + compare = self._mapItemSum[o1.item] - self._mapItemSum[o2.item] + if compare == 0: + return 0 + else: + return compare + + def _convert(self, value) -> float: + """ + To convert the given user specified value + + :param value: user specified value + :type value: int or float or str + :return: converted value + :rtype: float + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (self._dbLen * value) + if type(value) is str: + if '.' in value: + value = (self._dbLen * value) + else: + value = int(value) + return value + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + + :return: None + """ + self._transactions, self._fuzzyValues = [], [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self.transactions = self._iFile['Transactions'].tolist() + if 'fuzzyValues' in i: + self.fuzzyValues = self._iFile['Utilities'].tolist() + + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line = line.decode("utf-8") + line = line.split("\n")[0] + parts = line.split(":") + parts[0] = parts[0].strip() + parts[1] = parts[1].strip() + items = parts[0].split(self._sep) + quantities = parts[1].split(self._sep) + self._transactions.append([x for x in items]) + self._fuzzyValues.append([float(x) for x in quantities]) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.split("\n")[0] + parts = line.split(":") + parts[0] = parts[0].strip() + parts[1] = parts[1].strip() + items = parts[0].split(self._sep) + quantities = parts[1].split(self._sep) + self._transactions.append([x for x in items]) + self._fuzzyValues.append([float(x) for x in quantities]) + except IOError: + print("File Not Found") + quit() + + def _mapNeighbours(self) -> None: + self._mapItemNeighbours = {} + if isinstance(self._nFile, _ab._pd.DataFrame): + data, items = [], [] + if self._nFile.empty: + print("its empty..") + i = self._nFile.columns.values.tolist() + if 'items' in i: + items = self._nFile['items'].tolist() + if 'Neighbours' in i: + data = self._nFile['Neighbours'].tolist() + for k in range(len(items)): + self._mapItemNeighbours[items[k]] = data[k] + + if isinstance(self._nFile, str): + if _ab._validators.url(self._nFile): + data = _ab._urlopen(self._nFile) + for line in data: + line = line.decode("utf-8") + line = line.split("\n")[0] + parts = [i.rstrip() for i in line.split(self._sep)] + parts = [x for x in parts] + item = parts[0] + neigh1 = [] + for i in range(1, len(parts)): + neigh1.append(parts[i]) + self._mapItemNeighbours[item] = neigh1 + else: + try: + with open(self._nFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.split("\n")[0] + parts = [i.rstrip() for i in line.split(self._sep)] + parts = [x for x in parts] + item = parts[0] + neigh1 = [] + for i in range(1, len(parts)): + neigh1.append(parts[i]) + self._mapItemNeighbours[item] = neigh1 + except IOError: + print("File Not Found") + quit() + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + Frequent pattern mining process will start from here + + :return: None + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + Frequent pattern mining process will start from here + + :return: None + """ + self._startTime = _ab._time.time() + self._creatingItemSets() + self._finalPatterns = {} + self._mapNeighbours() + for line in range(len(self._transactions)): + items = self._transactions[line] + quantities = self._fuzzyValues[line] + self._dbLen += 1 + for i in range(0, len(items)): + item = items[i] + if item in self._mapItemSum: + self._mapItemSum[item] += quantities[i] + else: + self._mapItemSum[item] = quantities[i] + listOfFFList = [] + mapItemsToFFLIST = {} + #self._minSup = self._convert(self._minSup) + for item1 in self._mapItemSum.keys(): + item = item1 + if self._mapItemSum[item] >= self._minSup: + fuList = _FFList(item) + mapItemsToFFLIST[item] = fuList + listOfFFList.append(fuList) + listOfFFList.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + tid = 0 + for line in range(len(self._transactions)): + items = self._transactions[line] + quantities = self._fuzzyValues[line] + revisedTransaction = [] + for i in range(0, len(items)): + pair = _Pair() + pair.item = items[i] + pair.quantity = quantities[i] + item = pair.item + if self._mapItemSum[item] >= self._minSup: + if pair.quantity > 0: + revisedTransaction.append(pair) + revisedTransaction.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + for i in range(len(revisedTransaction) - 1, -1, -1): + pair = revisedTransaction[i] + remainUtil = 0 + for j in range(len(revisedTransaction) - 1, i, -1): + if self._mapItemNeighbours.get(pair.item[0]) is None: + continue + if revisedTransaction[j].item[0] in self._mapItemNeighbours[pair.item[0]]: + remainUtil += revisedTransaction[j].quantity + remainingUtility = remainUtil + if mapItemsToFFLIST.get(pair.item) is not None: + FFListOfItem = mapItemsToFFLIST[pair.item] + element = _Element(tid, pair.quantity, remainingUtility) + FFListOfItem.addElement(element) + tid += 1 + itemNeighbours = list(self._mapItemNeighbours.keys()) + self._FSFIMining(self._itemSetBuffer, 0, listOfFFList, self._minSup, itemNeighbours) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + + def _FSFIMining(self, prefix: List, prefixLen: int, FSFIM: List, minSup: float, itemNeighbours: List): + """ + Generates FFSPMiner from prefix + + :param prefix: the prefix patterns of FFSPMiner + :type prefix: len + :param prefixLen: the length of prefix + :type prefixLen: int + :param FSFIM: the Fuzzy list of prefix itemSets + :type FSFIM: list + :param minSup: the minimum support of + :type minSup: int + :param itemNeighbours: the set of common neighbours of prefix + :type itemNeighbours: list or set + """ + for i in range(0, len(FSFIM)): + X = FSFIM[i] + if X.sumIUtil >= minSup: + self._WriteOut(prefix, prefixLen, X.item, X.sumIUtil) + newNeighbours = self._Intersection(self._mapItemNeighbours.get(X.item[0]), itemNeighbours) + if X.sumRUtil >= minSup: + exULs = [] + for j in range(i + 1, len(FSFIM)): + Y = FSFIM[j] + if Y.item[0] in newNeighbours: + exULs.append(self._construct(X, Y)) + self._joinsCnt += 1 + self._itemSetBuffer.insert(prefixLen, X.item) + self._FSFIMining(self._itemSetBuffer, prefixLen + 1, exULs, minSup, newNeighbours) + + def _Intersection(self, neighbourX: List, neighbourY: List) -> List: + """ + A function to get common neighbours from 2 itemSets + + :param neighbourX: the set of neighbours of itemSet 1 + :type neighbourX: set or list + :param neighbourY: the set of neighbours of itemSet 2 + :type neighbourY: set or list + :return: set of common neighbours of 2 itemSets + :rtype: set + """ + result = [] + if neighbourX is None or neighbourY is None: + return result + for i in range(0, len(neighbourX)): + if neighbourX[i] in neighbourY: + result.append(neighbourX[i]) + return result + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime - self._startTime
+ + + def _construct(self, px: _FFList, py: _FFList) -> _FFList: + """ + A function to construct a new Fuzzy itemSet from 2 fuzzy itemSets + + :param px: the itemSet px + :type px: FFI-List + :param py: itemSet py + :type py: FFI-List + :return: the itemSet of pxy(px and py) + :rtype: FFI-List + """ + pxyUL = _FFList(py.item) + for ex in px.elements: + ey = self._findElementWithTID(py, ex.tid) + if ey is None: + continue + eXY = _Element(ex.tid, min([ex.iUtils, ey.iUtils], key=lambda x: float(x)), ey.rUtils) + pxyUL.addElement(eXY) + return pxyUL + + def _findElementWithTID(self, uList: _FFList, tid: int) -> _Element: + """ + To find element with same tid as given + + :param uList: fuzzyList + :type uList: FFI-List + :param tid: transaction id + :type tid: int + :return: element tid as given + :rtype: element if exist or None + """ + List = uList.elements + first = 0 + last = len(List) - 1 + while first <= last: + mid = (first + last) >> 1 + if List[mid].tid < tid: + first = mid + 1 + elif List[mid].tid > tid: + last = mid - 1 + else: + return List[mid] + return None + + def _WriteOut(self, prefix: List, prefixLen: int, item: int, sumIUtil: float) -> None: + """ + To Store the patten + + :param prefix: prefix of itemSet + :type prefix: list + :param prefixLen: length of prefix + :type prefixLen: int + :param item: the last item + :type item: int + :param sumIUtil: sum of utility of itemSet + :type sumIUtil: float + :return: None + """ + self._itemsCnt += 1 + res = "" + for i in range(0, prefixLen): + res += str(prefix[i]) + "\t" + res += str(item) + res1 = str(sumIUtil) + self._finalPatterns[res] = res1 + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def getPatterns(self) -> Dict[str, str]: + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self.oFile = outFile + writer = open(self.oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + " : " + str(y) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + """ + print("Total number of Spatial Fuzzy Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in seconds:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = FFSPMiner(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = FFSPMiner(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + _ap.mine() + print("Total number of Spatial Fuzzy Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + _ap = FFSPMiner('sample.txt', 'nei.txt', 1, ' ') + _ap.startMine() + _ap.mine() + print("Total number of Fuzzy-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save('output.txt') + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/fuzzyGeoreferencedFrequentPattern/basic/FFSPMiner_old.html b/sphinx/_build/html/_modules/PAMI/fuzzyGeoreferencedFrequentPattern/basic/FFSPMiner_old.html new file mode 100644 index 000000000..f6d42f45a --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/fuzzyGeoreferencedFrequentPattern/basic/FFSPMiner_old.html @@ -0,0 +1,899 @@ + + + + + + PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old

+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#     .. code-block:: python
+#
+#             from PAMI.fuzzyGeoreferencedFrequentPattern import FFSPMiner as alg
+#
+#             obj = alg.FFSPMiner("input.txt", "neighbours.txt", 2)
+#
+#             obj.mine()
+#
+#             fuzzySpatialFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of fuzzy frequent spatial patterns:", len(fuzzySpatialFrequentPatterns))
+#
+#             obj.save("outputFile")
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.fuzzyGeoreferencedFrequentPattern.basic import abstract as _ab
+from deprecated import deprecated
+
+
+class _FFList:
+    """
+    A class represent a Fuzzy List of an element
+
+    :Attributes:
+
+        item: int
+            the item name
+        sumIUtil: float
+            the sum of utilities of a fuzzy item in database
+        sumRUtil: float
+            the sum of resting values of a fuzzy item in database
+        elements: list
+            a list of elements contain tid,Utility and resting values of element in each transaction
+
+    :Methods:
+
+        addElement(element)
+            Method to add an element to this fuzzy list and update the sums at the same time.
+        printElement(e)
+            Method to print elements            
+
+    """
+
+    def __init__(self, itemName):
+        self.item = itemName
+        self.sumIUtil = 0.0
+        self.sumRUtil = 0.0
+        self.elements = []
+
+    def addElement(self, element):
+        """
+        A Method that add a new element to FFList
+
+        :param element: an element to be added to FFList
+        :type element: Element
+        """
+        self.sumIUtil += element.iUtils
+        self.sumRUtil += element.rUtils
+        self.elements.append(element)
+
+    def printElement(self):
+        """
+        A Method to Print elements in the FFList
+        """
+        for ele in self.elements:
+            print(ele.tid, ele.iUtils, ele.rUtils)
+
+
+class _Element:
+    """
+    A class represents an Element of a fuzzy list
+
+    :Attributes:
+
+        tid : int
+            keep tact of transaction id
+        iUtils : float
+            the utility of a fuzzy item in the transaction
+        rUtils : float
+            the neighbourhood resting value of a fuzzy item in the transaction
+    """
+
+    def __init__(self, tid, iUtil, rUtil):
+        self.tid = tid
+        self.iUtils = iUtil
+        self.rUtils = rUtil
+
+
+class _Regions:
+    """
+    A class calculate the regions
+
+    :Attributes:
+
+        low : int
+            low region value
+        middle : int
+            middle region value
+        high : int
+            high region values
+    """
+
+    def __init__(self, quantity, regionsNumber):
+        self.low = 0
+        self.middle = 0
+        self.high = 0
+        if regionsNumber == 3:  # if we have 3 regions
+            if 0 < quantity <= 1:
+                self.low = 1
+                self.high = 0
+                self.middle = 0
+            elif 1 < quantity <= 6:
+                self.low = float((6 - quantity) / 5)
+                self.middle = float((quantity - 1) / 5)
+                self.high = 0
+            elif 6 < quantity <= 11:
+                self.low = 0
+                self.middle = float((11 - quantity) / 5)
+                self.high = float((quantity - 6) / 5)
+            else:
+                self.low = 0
+                self.middle = 0
+                self.high = 1
+
+
+class _Pair:
+    """
+    A class to store item and it's quantity together
+    """
+
+    def __init__(self):
+        self.item = 0
+        self.quantity = 0
+
+
+
+[docs] +class FFSPMiner(_ab._fuzzySpatialFrequentPatterns): + """ + :Description: Fuzzy Frequent Spatial Pattern-Miner is desired to find all Spatially frequent fuzzy patterns + which is on-trivial and challenging problem to its huge search space.we are using efficient pruning + techniques to reduce the search space. + + Reference: Reference: P. Veena, B. S. Chithra, R. U. Kiran, S. Agarwal and K. Zettsu, "Discovering Fuzzy Frequent + Spatial Patterns in Large Quantitative Spatiotemporal databases," 2021 IEEE International Conference on Fuzzy Systems + (FUZZ-IEEE), 2021, pp. 1-8, doi: 10.1109/FUZZ45933.2021.9494594. + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float. + :param maxPer: float : + The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + :param nFile: str : + Name of the input file to mine complete set of frequent patterns + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the input file to mine complete set of fuzzy spatial frequent patterns + oFile : file + Name of the oFile file to store complete set of fuzzy spatial frequent patterns + minSup : float + The user given minimum support + neighbors : map + keep track of neighbours of elements + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime : float + To record the start time of the mining process + endTime : float + To record the completion time of the mining process + itemsCnt : int + To record the number of fuzzy spatial itemSets generated + mapItemsLowSum : map + To keep track of low region values of items + mapItemsMidSum : map + To keep track of middle region values of items + mapItemsHighSum : map + To keep track of high region values of items + mapItemSum : map + To keep track of sum of Fuzzy Values of items + mapItemRegions : map + To Keep track of fuzzy regions of item + joinsCnt : int + To keep track of the number of FFI-list that was constructed + BufferSize : int + represent the size of Buffer + itemSetBuffer : list + to keep track of items in buffer + + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + convert(value) + To convert the given user specified value + FSFIMining( prefix, prefixLen, fsFim, minSup) + Method generate FFI from prefix + construct(px, py) + A function to construct Fuzzy itemSet from 2 fuzzy itemSets + Intersection(neighbourX,neighbourY) + Return common neighbours of 2 itemSet Neighbours + findElementWithTID(uList, tid) + To find element with same tid as given + WriteOut(prefix, prefixLen, item, sumIUtil,period) + To Store the patten + + **Executing the code on terminal :** + ---------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 FFSPMiner_old.py <inputFile> <outputFile> <neighbours> <minSup> <sep> + + Example Usage: + + (.venv) $ python3 FFSPMiner_old.py sampleTDB.txt output.txt sampleN.txt 3 + + (.venv) $ python3 FFSPMiner_old.py sampleTDB.txt output.txt sampleN.txt 0.3 + + (.venv) $ python3 FFSPMiner_old.py sampleTDB.txt output.txt sampleN.txt 3 + + .. note:: minSup will be considered in percentage of database transactions + + + **Sample run of importing the code:** + ---------------------------------------- + + from PAMI.fuzzyGeoreferencedFrequentPattern import FFSPMiner as alg + + obj = alg.FFSPMiner("input.txt", "neighbours.txt", 2) + + obj.mine() + + fuzzySpatialFrequentPatterns = obj.getPatterns() + + print("Total number of fuzzy frequent spatial patterns:", len(fuzzySpatialFrequentPatterns)) + + obj.save("outputFile") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + --------------- + The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran. + """ + + _startTime = float() + _endTime = float() + _minSup = str() + _maxPer = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _nFile = " " + _memoryUSS = float() + _memoryRSS = float() + _sep = "\t" + _transactions = [] + _fuzzyValues = [] + + def __init__(self, iFile, nFile, minSup, sep="\t"): + super().__init__(iFile, nFile, minSup, sep) + self._mapItemNeighbours = {} + self._startTime = 0 + self._endTime = 0 + self._itemsCnt = 0 + self._mapItemsLowSum = {} + self._mapItemsMidSum = {} + self._mapItemsHighSum = {} + self._mapItemSum = {} + self._mapItemRegions = {} + self._joinsCnt = 0 + self._BufferSize = 200 + self._itemSetBuffer = [] + self._finalPatterns = {} + self._dbLen = 0 + + def _compareItems(self, o1, o2): + """ + A Function that sort all FFI-list in ascending order of Support + """ + compare = self._mapItemSum[o1.item] - self._mapItemSum[o2.item] + if compare == 0: + return int(o1.item) - int(o2.item) + else: + return compare + + def _convert(self, value): + """ + To convert the given user specified value + + :param value: user specified value + :type value: int or float or str + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (self._dbLen * value) + if type(value) is str: + if '.' in value: + value = (self._dbLen * value) + else: + value = int(value) + return value + + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._transactions, self._fuzzyValues = [], [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self.transactions = self._iFile['Transactions'].tolist() + if 'fuzzyValues' in i: + self.fuzzyValues = self._iFile['Utilities'].tolist() + + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line = line.decode("utf-8") + line = line.split("\n")[0] + parts = line.split(":") + parts[0] = parts[0].strip() + parts[2] = parts[2].strip() + items = parts[0].split(self._sep) + quantities = parts[2].split(self._sep) + self.transactions.append([x for x in items]) + self.fuzzyValues.append([x for x in quantities]) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.split("\n")[0] + parts = line.split(":") + parts[0] = parts[0].strip() + parts[2] = parts[2].strip() + items = parts[0].split(self._sep) + quantities = parts[2].split(self._sep) + self._transactions.append([x for x in items]) + self._fuzzyValues.append([x for x in quantities]) + except IOError: + print("File Not Found") + quit() + + def _mapNeighbours(self): + self._mapItemNeighbours = {} + if isinstance(self._nFile, _ab._pd.DataFrame): + data, items = [], [] + if self._nFile.empty: + print("its empty..") + i = self._nFile.columns.values.tolist() + if 'items' in i: + items = self._nFile['items'].tolist() + if 'Neighbours' in i: + data = self._nFile['Neighbours'].tolist() + for k in range(len(items)): + self._mapItemNeighbours[items[k]] = data[k] + + if isinstance(self._nFile, str): + if _ab._validators.url(self._nFile): + data = _ab._urlopen(self._nFile) + for line in data: + line = line.decode("utf-8") + line = line.split("\n")[0] + parts = [i.rstrip() for i in line.split(self._sep)] + parts = [x for x in parts] + item = parts[0] + neigh1 = [] + for i in range(1, len(parts)): + neigh1.append(parts[i]) + self._mapItemNeighbours[item] = neigh1 + else: + try: + with open(self._nFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.split("\n")[0] + parts = [i.rstrip() for i in line.split(self._sep)] + parts = [x for x in parts] + item = parts[0] + neigh1 = [] + for i in range(1, len(parts)): + neigh1.append(parts[i]) + self._mapItemNeighbours[item] = neigh1 + except IOError: + print("File Not Found") + quit() + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + Frequent pattern mining process will start from here + """ + self.mine()
+ + +
+[docs] + def mine(self): + """ + Frequent pattern mining process will start from here + """ + self._startTime = _ab._time.time() + self._creatingItemSets() + self._finalPatterns = {} + self._mapNeighbours() + for line in range(len(self._transactions)): + items = self._transactions[line] + quantities = self._fuzzyValues[line] + self._dbLen += 1 + for i in range(0, len(items)): + regions = _Regions(int(quantities[i]), 3) + item = items[i] + if item in self._mapItemsLowSum.keys(): + low = self._mapItemsLowSum[item] + low += regions.low + self._mapItemsLowSum[item] = low + else: + self._mapItemsLowSum[item] = regions.low + if item in self._mapItemsMidSum.keys(): + mid = self._mapItemsMidSum[item] + mid += regions.middle + self._mapItemsMidSum[item] = mid + else: + self._mapItemsMidSum[item] = regions.middle + if item in self._mapItemsHighSum.keys(): + high = self._mapItemsHighSum[item] + high += regions.high + self._mapItemsHighSum[item] = high + else: + self._mapItemsHighSum[item] = regions.high + listOfFFList = [] + mapItemsToFFLIST = {} + self._minSup = self._convert(self._minSup) + for item1 in self._mapItemsLowSum.keys(): + item = item1 + low = self._mapItemsLowSum[item] + mid = self._mapItemsMidSum[item] + high = self._mapItemsHighSum[item] + if low >= mid and low >= high: + self._mapItemSum[item] = low + self._mapItemRegions[item] = "L" + elif mid >= low and mid >= high: + self._mapItemSum[item] = mid + self._mapItemRegions[item] = "M" + elif high >= low and high >= mid: + self._mapItemRegions[item] = "H" + self._mapItemSum[item] = high + if self._mapItemSum[item] >= self._minSup: + fuList = _FFList(item) + mapItemsToFFLIST[item] = fuList + listOfFFList.append(fuList) + listOfFFList.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + tid = 0 + for line in range(len(self._transactions)): + items = self._transactions[line] + quantities = self._fuzzyValues[line] + revisedTransaction = [] + for i in range(0, len(items)): + pair = _Pair() + pair.item = items[i] + regions = _Regions(int(quantities[i]), 3) + item = pair.item + if self._mapItemSum[item] >= self._minSup: + if self._mapItemRegions[pair.item] == "L": + pair.quantity = regions.low + elif self._mapItemRegions[pair.item] == "M": + pair.quantity = regions.middle + elif self._mapItemRegions[pair.item] == "H": + pair.quantity = regions.high + if pair.quantity > 0: + revisedTransaction.append(pair) + revisedTransaction.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + for i in range(len(revisedTransaction) - 1, -1, -1): + pair = revisedTransaction[i] + remainUtil = 0 + for j in range(len(revisedTransaction) - 1, i, -1): + if self._mapItemNeighbours.get(pair.item) is None: + continue + if revisedTransaction[j].item in self._mapItemNeighbours[pair.item]: + remainUtil += revisedTransaction[j].quantity + remainingUtility = remainUtil + if mapItemsToFFLIST.get(pair.item) is not None: + FFListOfItem = mapItemsToFFLIST[pair.item] + element = _Element(tid, pair.quantity, remainingUtility) + FFListOfItem.addElement(element) + tid += 1 + itemNeighbours = list(self._mapItemNeighbours.keys()) + self._FSFIMining(self._itemSetBuffer, 0, listOfFFList, self._minSup, itemNeighbours) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + + def _FSFIMining(self, prefix, prefixLen, FSFIM, minSup, itemNeighbours): + """ + Generates FFSPMiner from prefix + + :param prefix: the prefix patterns of FFSPMiner + :type prefix: len + :param prefixLen: the length of prefix + :type prefixLen: int + :param FSFIM: the Fuzzy list of prefix itemSets + :type FSFIM: list + :param minSup: the minimum support of + :type minSup: int + :param itemNeighbours: the set of common neighbours of prefix + :type itemNeighbours: list or set + """ + for i in range(0, len(FSFIM)): + X = FSFIM[i] + if X.sumIUtil >= minSup: + self._WriteOut(prefix, prefixLen, X.item, X.sumIUtil) + newNeighbours = self._Intersection(self._mapItemNeighbours.get(X.item), itemNeighbours) + if X.sumRUtil >= minSup: + exULs = [] + for j in range(i + 1, len(FSFIM)): + Y = FSFIM[j] + if Y.item in newNeighbours: + exULs.append(self._construct(X, Y)) + self._joinsCnt += 1 + self._itemSetBuffer.insert(prefixLen, X.item) + self._FSFIMining(self._itemSetBuffer, prefixLen + 1, exULs, minSup, newNeighbours) + + def _Intersection(self, neighbourX, neighbourY): + """ + A function to get common neighbours from 2 itemSets + + :param neighbourX: the set of neighbours of itemSet 1 + :type neighbourX: set or list + :param neighbourY: the set of neighbours of itemSet 2 + :type neighbourY: set or list + :return: set of common neighbours of 2 itemSets + :rtype: set + """ + result = [] + if neighbourX is None or neighbourY is None: + return result + for i in range(0, len(neighbourX)): + if neighbourX[i] in neighbourY: + result.append(neighbourX[i]) + return result + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime - self._startTime
+ + + def _construct(self, px, py): + """ + A function to construct a new Fuzzy itemSet from 2 fuzzy itemSets + + :param px: the itemSet px + :type px: FFI-List + :param py: itemSet py + :type py: FFI-List + :return: the itemSet of pxy(px and py) + :rtype: FFI-List + """ + pxyUL = _FFList(py.item) + for ex in px.elements: + ey = self._findElementWithTID(py, ex.tid) + if ey is None: + continue + eXY = _Element(ex.tid, min([ex.iUtils, ey.iUtils], key=lambda x: float(x)), ey.rUtils) + pxyUL.addElement(eXY) + return pxyUL + + def _findElementWithTID(self, uList, tid): + """ + To find element with same tid as given + + :param uList: fuzzyList + :type uList: FFI-List + :param tid: transaction id + :type tid: int + :return: element tid as given + :rtype: element if exist or None + """ + List = uList.elements + first = 0 + last = len(List) - 1 + while first <= last: + mid = (first + last) >> 1 + if List[mid].tid < tid: + first = mid + 1 + elif List[mid].tid > tid: + last = mid - 1 + else: + return List[mid] + return None + + def _WriteOut(self, prefix, prefixLen, item, sumIUtil): + """ + To Store the patten + + :param prefix: prefix of itemSet + :type prefix: list + :param prefixLen: length of prefix + :type prefixLen: int + :param item: the last item + :type item: int + :param sumIUtil: sum of utility of itemSet + :type sumIUtil: float + """ + self._itemsCnt += 1 + res = "" + for i in range(0, prefixLen): + res += str(prefix[i]) + "." + str(self._mapItemRegions[prefix[i]]) + "\t" + res += str(item) + "." + str(self._mapItemRegions.get(item)) + res1 = str(sumIUtil) + self._finalPatterns[res] = res1 + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self.oFile = outFile + writer = open(self.oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + " : " + str(y) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Total number of Spatial Fuzzy Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in seconds:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = FFSPMiner(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = FFSPMiner(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + _ap.mine() + print("Total number of Spatial Fuzzy Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/fuzzyGeoreferencedPeriodicFrequentPattern/basic/FGPFPMiner.html b/sphinx/_build/html/_modules/PAMI/fuzzyGeoreferencedPeriodicFrequentPattern/basic/FGPFPMiner.html new file mode 100644 index 000000000..5b3e361f8 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/fuzzyGeoreferencedPeriodicFrequentPattern/basic/FGPFPMiner.html @@ -0,0 +1,882 @@ + + + + + + PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner

+# Fuzzy Frequent Spatial Pattern-Miner is desired to find all Spatially frequent fuzzy patterns
+# which is on-trivial and challenging problem to its huge search space.we are using efficient pruning
+# techniques to reduce the search space.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.fuzzyGeoreferencedPeriodicFrequentPattern import FGPFPMiner as alg
+#
+#             obj = alg.FFSPMiner("input.txt", "neighbours.txt", 3, 4)
+#
+#             obj.mine()
+#
+#             print("Total number of fuzzy frequent spatial patterns:", len(obj.getPatterns()))
+#
+#             obj.save("outputFile")
+#
+#             print("Total Memory in USS:", obj.getMemoryUSS())
+#
+#             print("Total Memory in RSS", obj.getMemoryRSS())
+#
+#             print("Total ExecutionTime in seconds:", obj.getRuntime())
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+
+import PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.abstract as _ab
+from deprecated import deprecated
+
+
+class _FFList:
+    """
+    A class represent a Fuzzy List of an element
+
+    :Attributes:
+
+         item : int
+             the item name
+         sumIUtil : float
+             the sum of utilities of a fuzzy item in database
+         sumRUtil : float
+             the sum of resting values of a fuzzy item in database
+         elements : list
+             a list of elements contain tid,Utility and resting values of element in each transaction
+
+    :Methods:
+
+        addElement(element)
+            Method to add an element to this fuzzy list and update the sums at the same time.
+        printElement(e)
+            Method to print elements
+    """
+
+    def __init__(self, itemName):
+        self.item = itemName
+        self.isPeriodic = False
+        self.sumIUtil = 0.0
+        self.sumRUtil = 0.0
+        self.elements = []
+
+    def addElement(self, element):
+        """
+        A Method that add a new element to FFList
+
+        :param element: an element to be added to FFList
+        :type element: Element
+        """
+        self.sumIUtil += element.iUtils
+        self.sumRUtil += element.rUtils
+        self.elements.append(element)
+
+    def printElement(self):
+        """
+        A Method to Print elements in the FFList object
+        """
+        for ele in self.elements:
+            print(ele.tid, ele.iUtils, ele.rUtils)
+
+
+class _Element:
+    """
+    A class represents an Element of a fuzzy list
+
+    :Attributes:
+
+        tid : int
+            keep tact of transaction id
+        iUtils : float
+            the utility of a fuzzy item in the transaction
+        rUtils : float
+            the neighbourhood resting value of a fuzzy item in the transaction
+    """
+
+    def __init__(self, tid, iUtil, rUtil):
+        self.tid = tid
+        self.iUtils = iUtil
+        self.rUtils = rUtil
+
+
+class _Pair:
+    """
+    A class to store item and it's quantity together
+    """
+
+    def __init__(self):
+        self.item = 0
+        self.quantity = 0
+
+
+
+[docs] +class FGPFPMiner(_ab._fuzzySpatialFrequentPatterns): + """ + :Description: Fuzzy Frequent Spatial Pattern-Miner is desired to find all Spatially frequent fuzzy patterns + which is on-trivial and challenging problem to its huge search space.we are using efficient pruning + techniques to reduce the search space. + + :Reference: + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float. + :param maxPer: float : + The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + :param nFile: str : + Name of the input file to mine complete set of frequent patterns + :param FuzFile: str : + The user can specify fuzFile. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the input file to mine complete set of fuzzy spatial frequent patterns + oFile : file + Name of the oFile file to store complete set of fuzzy spatial frequent patterns + minSup : float + The user given minimum support + neighbors : map + keep track of neighbours of elements + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime : float + To record the start time of the mining process + endTime : float + To record the completion time of the mining process + itemsCnt : int + To record the number of fuzzy spatial itemSets generated + mapItemSum : map + To keep track of sum of Fuzzy Values of items + joinsCnt : int + To keep track of the number of FFI-list that was constructed + BufferSize : int + represent the size of Buffer + itemSetBuffer list + to keep track of items in buffer + + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + convert(value) + To convert the given user specified value + FSFIMining( prefix, prefixLen, fsFim, minSup) + Method generate FFI from prefix + construct(px, py) + A function to construct Fuzzy itemSet from 2 fuzzy itemSets + Intersection(neighbourX,neighbourY) + Return common neighbours of 2 itemSet Neighbours + findElementWithTID(uList, tid) + To find element with same tid as given + WriteOut(prefix, prefixLen, item, sumIUtil,period) + To Store the patten + + **Executing the code on terminal :** + -------------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 FGPFPMiner.py <inputFile> <outputFile> <neighbours> <minSup> <maxPer> <sep> + + Example Usage: + + (.venv) $ python3 FGPFPMiner.py sampleTDB.txt output.txt sampleN.txt 3 4 + + .. note:: minSup will be considered in percentage of database transactions + + + **Sample run of importing the code:** + ---------------------------------------- + .. code-block:: python + + from PAMI.fuzzyGeoreferencedPeriodicFrequentPattern import FGPFPMiner as alg + + obj = alg.FFSPMiner("input.txt", "neighbours.txt", 3, 4) + + obj.mine() + + print("Total number of fuzzy frequent spatial patterns:", len(obj.getPatterns())) + + obj.save("outputFile") + + print("Total Memory in USS:", obj.getMemoryUSS()) + + print("Total Memory in RSS", obj.getMemoryRSS()) + + print("Total ExecutionTime in seconds:", obj.getRuntime()) + + **Credits:** + ---------------- + The complete program was written by B.Sai Chitra and Kundai Kwangwari under the supervision of Professor Rage Uday Kiran. + """ + + _startTime = float() + _endTime = float() + _minSup = str() + _maxPer = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _nFile = " " + _FuzFile = " " + _memoryUSS = float() + _memoryRSS = float() + _sep = "\t" + _transactionsDB = [] + _fuzzyValuesDB = [] + _ts = [] + + def __init__(self, iFile, nFile, minSup, maxPer, sep): + super().__init__(iFile, nFile, minSup, maxPer, sep) + self._mapItemNeighbours = {} + self._startTime = 0 + self._endTime = 0 + self._itemsCnt = 0 + self._itemSupData = {} + self._mapItemSum = {} + self._joinsCnt = 0 + self._BufferSize = 200 + self._itemSetBuffer = [] + self._finalPatterns = {} + self._finalPeriodicPatterns = {} + self._tidList = {} + self._dbLen = 0 + + def _compareItems(self, o1, o2) -> int: + """ + A Function that sort all FFI-list in ascending order of Support + + :param o1: First FFI-list + :type o1: _FFList + :param o2: Second FFI-list + :type o2: _FFList + :return: Comparison Value + :rtype: int + """ + compare = self._mapItemSum[o1.item] - self._mapItemSum[o2.item] + if compare == 0: + return int(o1.item) - int(o2.item) + else: + return compare + + def _convert(self, value) -> float: + """ + To convert the given user specified value + + :param value: user specified value + :type value: int or float or str + :return: converted value + :rtype: float + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (self._dbLen * value) + if type(value) is str: + if '.' in value: + value = float(value) + else: + value = int(value) + return value + + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._transactionsDB, self._fuzzyValuesDB, self._ts = [], [], [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._transactionsDB = self._iFile['Transactions'].tolist() + if 'fuzzyValues' in i: + self._fuzzyValuesDB = self._iFile['fuzzyValues'].tolist() + + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line = line.decode("utf-8") + line = line.split("\n")[0] + parts = line.split(":") + items = parts[0].split(self._sep) + quantities = parts[1].split(self._sep) + self._ts.append(int(items[0])) + self._transactionsDB.append([x for x in items[1:]]) + self._fuzzyValuesDB.append([float(x) for x in quantities]) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.split("\n")[0] + parts = line.split(":") + parts[0] = parts[0].strip() + parts[1] = parts[1].strip() + items = parts[0].split(self._sep) + quantities = parts[1].split(self._sep) + self._ts.append(int(items[0])) + self._transactionsDB.append([x for x in items[1:]]) + self._fuzzyValuesDB.append([float(x) for x in quantities]) + except IOError: + print("File Not Found") + quit() + + def _mapNeighbours(self): + """ + A function to map items to their Neighbours + """ + self._mapItemNeighbours = {} + if isinstance(self._nFile, _ab._pd.DataFrame): + data, items = [], [] + if self._nFile.empty: + print("its empty..") + i = self._nFile.columns.values.tolist() + if 'items' in i: + items = self._nFile['items'].tolist() + if 'Neighbours' in i: + data = self._nFile['Neighbours'].tolist() + for k in range(len(items)): + self._mapItemNeighbours[items[k]] = data[k] + + if isinstance(self._nFile, str): + if _ab._validators.url(self._nFile): + data = _ab._urlopen(self._nFile) + for line in data: + line = line.decode("utf-8") + line = line.split("\n")[0] + parts = [i.rstrip() for i in line.split(self._sep)] + parts = [x for x in parts] + item = parts[0] + neigh1 = [] + for i in range(1, len(parts)): + neigh1.append(parts[i]) + self._mapItemNeighbours[item] = neigh1 + else: + try: + with open(self._nFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.split("\n")[0] + parts = [i.rstrip() for i in line.split(self._sep)] + parts = [x for x in parts] + item = parts[0] + neigh1 = [] + for i in range(1, len(parts)): + neigh1.append(parts[i]) + self._mapItemNeighbours[item] = neigh1 + except IOError: + print(self._nFile) + print("File Not Found") + quit() + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + Frequent pattern mining process will start from here + """ + self.mine()
+ + +
+[docs] + def mine(self): + """ + Frequent pattern mining process will start from here + """ + self._startTime = _ab._time.time() + self._mapNeighbours() + self._creatingItemSets() + self._finalPatterns = {} + recent_occur = {} + for line in range(len(self._transactionsDB)): + item_list = self._transactionsDB[line] + fuzzyValues_list = self._fuzzyValuesDB[line] + ts = self._ts[line] + self._dbLen += 1 + """ + The section below is for: + 1.Finding the support of each item's region in the entire database + 2.Finding the periodic patterns of the data + 3.Trimming off the patterns whose support is less than minSupport + """ + for i in range(0, len(item_list)): + item = item_list[i] + if item in self._tidList: + self._tidList[item].append(ts - recent_occur[item][-1]) + recent_occur[item].append(ts) + else: + self._tidList[item] = [ts] + recent_occur[item] = [ts] + fuzzy_ref = fuzzyValues_list[i] + if item[0] in self._mapItemNeighbours: + if item in self._itemSupData.keys(): + self._itemSupData[item] += fuzzy_ref + else: + self._itemSupData[item] = fuzzy_ref + for item in self._tidList.keys(): + self._tidList[item].append(len(self._transactionsDB) - recent_occur[item][-1]) + del recent_occur + """ + Using Maximum Scalar Cardinality Value strategy to narrow down search space and generate candidate fuzzy periodic-frequent items. + Step1. Identify the regional representative (region with max support). This is the representative that will be tested to see if its greater than given minSup + Step2. prune out all items whose regional support is less than the given minSup + Step3. At the end, sort the list of stored Candidate Frequent-Periodic Patterns in ascending order + """ + + listOfFFList = [] + mapItemsToFFLIST = {} + region_label = [] + #self._minSup = self._convert(self._minSup) + for item in self._itemSupData.keys(): + if self._itemSupData[item] >= self._minSup: + self._mapItemSum[item] = self._itemSupData[item] + fuList = _FFList(item) + if int(self._maxPer) >= max(self._tidList[item]): + fuList.isPeriodic = True + mapItemsToFFLIST[item] = fuList + listOfFFList.append(fuList) + del self._itemSupData + del self._tidList + listOfFFList.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + tid = 0 + for j in range(len(self._transactionsDB)): + item_list = list(set(self._transactionsDB[j]).intersection(set(self._mapItemSum.keys()))) + fuzzy_list = [self._fuzzyValuesDB[j][i] for i in range(len(self._fuzzyValuesDB[j])) if self._transactionsDB[j][i] in self._mapItemSum.keys()] + revisedTransaction = [] + for i in range(0, len(item_list)): + pair = _Pair() + pair.item = item_list[i] + fuzzy_ref = fuzzy_list[i] + pair.quantity = fuzzy_ref + if pair.quantity > 0: + revisedTransaction.append(pair) + revisedTransaction.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + qaunt = {} + for i in range(len(revisedTransaction) - 1, -1, -1): + pair = revisedTransaction[i] + qaunt[pair.item[0]] = pair.quantity + remainUtil = 0 + temp = list(set(self._mapItemNeighbours[pair.item[0]]).intersection(set(qaunt.keys()))) + # print(temp, self._mapItemNeighbours[pair.item[0]], qaunt) + for j in temp: + remainUtil += float(qaunt[j]) + del temp + remainingUtility = remainUtil + FFListObject = mapItemsToFFLIST[pair.item] + element = _Element(tid, pair.quantity, remainingUtility) + FFListObject.addElement(element) + del qaunt + tid += 1 + itemNeighbours = list(self._mapItemNeighbours.keys()) + self._FSFIMining(self._itemSetBuffer, 0, listOfFFList, self._minSup, itemNeighbours) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + + def _FSFIMining(self, prefix, prefixLen, FSFIM, minSup, itemNeighbours): + """ + Generates FFSPMiner from prefix + + :param prefix: the prefix patterns of FFSPMiner + :type prefix: len + :param prefixLen: the length of prefix + :type prefixLen: int + :param FSFIM: the Fuzzy list of prefix itemSets + :type FSFIM: list + :param minSup: the minimum support of + :type minSup:int + :param itemNeighbours: the set of common neighbours of prefix + :type itemNeighbours: list or set + """ + for i in range(0, len(FSFIM)): + _FFListObject1 = FSFIM[i] + if _FFListObject1.sumIUtil >= minSup: + self._WriteOut(prefix, prefixLen, _FFListObject1, _FFListObject1.sumIUtil) + newNeighbourList = self._Intersection(self._mapItemNeighbours.get(_FFListObject1.item[0]), itemNeighbours) + if _FFListObject1.sumRUtil >= minSup: + exULs = [] + for j in range(i + 1, len(FSFIM)): + _FFListObject2 = FSFIM[j] + if _FFListObject2.item in newNeighbourList: + exULs.append(self._construct(_FFListObject1, _FFListObject2)) + self._joinsCnt += 1 + self._itemSetBuffer.insert(prefixLen, _FFListObject1.item) + self._FSFIMining(self._itemSetBuffer, prefixLen + 1, exULs, minSup, newNeighbourList) + + def _Intersection(self, neighbourX, neighbourY): + """ + A function to get common neighbours from 2 itemSets + + :param neighbourX: the set of neighbours of itemSet 1 + :type neighbourX: set or list + :param neighbourY: the set of neighbours of itemSet 2 + :type neighbourY: set or list + :return : set of common neighbours of 2 itemSets + :rtype :set + """ + result = [] + if neighbourX is None or neighbourY is None: + return result + for i in range(0, len(neighbourX)): + if neighbourX[i] in neighbourY: + result.append(neighbourX[i]) + return result + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime - self._startTime
+ + + def _construct(self, _FFListObject1, _FFListObject2): + """ + A function to construct a new Fuzzy itemSet from 2 fuzzy itemSets + + :param _FFListObject1:the itemSet px + :type _FFListObject1:FFI-List + :param _FFListObject2:itemSet py + :type _FFListObject2:FFI-List + :return :the itemSet of pxy(px and py) + :rtype :FFI-List + """ + recent_occur, first_occur, tid = 0, 0, 0 + periodlist = [] + _newFFListObject = _FFList(_FFListObject2.item) + for Ob1Element in _FFListObject1.elements: + Ob2Element = self._findElementWithTID(_FFListObject2, Ob1Element.tid) + if Ob2Element is None: + continue + tid = Ob1Element.tid + if len(periodlist) == 0: + periodlist.append(abs(first_occur - tid)) + recent_occur = tid + else: + periodlist.append(tid - recent_occur) + recent_occur = tid + newElement = _Element(Ob1Element.tid, min([Ob1Element.iUtils, Ob2Element.iUtils], key=lambda x: float(x)), + Ob2Element.rUtils) + _newFFListObject.addElement(newElement) + + if periodlist and int(self._maxPer) >= max(periodlist): + _newFFListObject.isPeriodic = True + else: + _newFFListObject.isPeriodic = False + return _newFFListObject + + def _findElementWithTID(self, uList, tid): + """ + To find element with same tid as given + + :param uList:fuzzyList + :type uList:FFI-List + :param tid:transaction id + :type tid:int + :return:element tid as given + :rtype: element if exist or None + """ + List = uList.elements + first = 0 + last = len(List) - 1 + while first <= last: + mid = (first + last) >> 1 + if List[mid].tid < tid: + first = mid + 1 + elif List[mid].tid > tid: + last = mid - 1 + else: + return List[mid] + return None + + def _WriteOut(self, prefix, prefixLen, _FFListObject, sumIUtil): + """ + To Store the patten + + :param prefix: prefix of itemSet + :type prefix: list + :param prefixLen: length of prefix + :type prefixLen: int + :param item: the last item + :type item: int + :param sumIUtil: sum of utility of itemSet + :type sumIUtil: float + """ + item = _FFListObject.item + self._itemsCnt += 1 + res = "" + for i in range(0, prefixLen): + res += str(prefix[i]) + "\t" + res += str(item) + res1 = str(sumIUtil) + self._finalPatterns[res] = res1 + + if _FFListObject.isPeriodic: + self._finalPeriodicPatterns[res] = res1 + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPeriodicPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPeriodicPatterns
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self.oFile = outFile + keylist = (self._finalPatterns.keys()) + writer = open(self.oFile, 'w+') + for x in keylist: + patternsAndSupport = x.strip() + ":" + str(self._finalPatterns[x]) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the result + """ + print("Total number of Spatial Fuzzy Periodic-Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = FGPFPMiner(_ab._sys.argv[1], _ab._sys.argv[2], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5], + _ab._sys.argv[6]) + if len(_ab._sys.argv) == 5: + _ap = FGPFPMiner(_ab._sys.argv[1], _ab._sys.argv[2], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + _ap.startMine() + _ap.mine() + print("Total number of Spatial Fuzzy Periodic Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + _ap.save("outputfile.txt") + else: + _ap = FGPFPMiner('sample.txt','nei.txt', 1, 10, ' ') + _ap.startMine() + _ap.mine() + print("Total number of Fuzzy Periodic-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save('output.txt') + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + print("Error! The number of input parameters do not match the total number of parameters provided") + + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/fuzzyGeoreferencedPeriodicFrequentPattern/basic/FGPFPMiner_old.html b/sphinx/_build/html/_modules/PAMI/fuzzyGeoreferencedPeriodicFrequentPattern/basic/FGPFPMiner_old.html new file mode 100644 index 000000000..368cd42fd --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/fuzzyGeoreferencedPeriodicFrequentPattern/basic/FGPFPMiner_old.html @@ -0,0 +1,1000 @@ + + + + + + PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old

+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.fuzzyGeoreferencedPeriodicFrequentPattern import FGPFPMiner as alg
+#
+#             obj = alg.FFSPMiner("input.txt", "neighbours.txt", 3, 4)
+#
+#             obj.mine()
+#
+#             print("Total number of fuzzy frequent spatial patterns:", len(obj.getPatterns()))
+#
+#             obj.save("outputFile")
+#
+#             print("Total Memory in USS:", obj.getMemoryUSS())
+#
+#             print("Total Memory in RSS", obj.getMemoryRSS())
+#
+#             print("Total ExecutionTime in seconds:", obj.getRuntime())
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+import pandas as pd
+import plotly.express as px
+import PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.abstract as _ab
+from deprecated import deprecated
+
+
+class _FFList:
+    """
+    A class represent a Fuzzy List of an element
+
+    :Attributes:
+
+         item : int
+             the item name
+         sumIUtil : float
+             the sum of utilities of a fuzzy item in database
+         sumRUtil : float
+             the sum of resting values of a fuzzy item in database
+         elements : list
+             a list of elements contain tid,Utility and resting values of element in each transaction
+    :Methods:
+
+        addElement(element)
+            Method to add an element to this fuzzy list and update the sums at the same time.
+        printElement(e)
+            Method to print elements
+    """
+
+    def __init__(self, itemName):
+        self.item = itemName
+        self.isPeriodic = False
+        self.sumIUtil = 0.0
+        self.sumRUtil = 0.0
+        self.elements = []
+
+    def addElement(self, element):
+        """
+        A Method that add a new element to FFList
+
+        :param element: an element to be added to FFList
+        :param element: Element
+        """
+        self.sumIUtil += element.iUtils
+        self.sumRUtil += element.rUtils
+        self.elements.append(element)
+
+    def printElement(self):
+        """
+        A Method to Print elements in the FFList object
+        """
+        for ele in self.elements:
+            print(ele.tid, ele.iUtils, ele.rUtils)
+
+
+class _Element:
+    """
+    A class represents an Element of a fuzzy list
+
+    :Attributes:
+
+        tid : int
+            keep tact of transaction id
+        iUtils : float
+            the utility of a fuzzy item in the transaction
+        rUtils : float
+            the neighbourhood resting value of a fuzzy item in the transaction
+    """
+
+    def __init__(self, tid, iUtil, rUtil):
+        self.tid = tid
+        self.iUtils = iUtil
+        self.rUtils = rUtil
+
+
+class _Pair:
+    """
+    A class to store item and it's quantity together
+    """
+
+    def __init__(self):
+        self.item = 0
+        self.quantity = 0
+
+
+
+[docs] +class FGPFPMiner(_ab._fuzzySpatialFrequentPatterns): + """ + :Description: Fuzzy Frequent Spatial Pattern-Miner is desired to find all Spatially frequent fuzzy patterns + which is on-trivial and challenging problem to its huge search space.we are using efficient pruning + techniques to reduce the search space. + + :Reference: + + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float. + :param maxPer: float : + The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + :param nFile: str : + Name of the input file to mine complete set of frequent patterns + :param FuzFile: str : + The user can specify fuzFile. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the input file to mine complete set of fuzzy spatial frequent patterns + oFile : file + Name of the oFile file to store complete set of fuzzy spatial frequent patterns + minSup : float + The user given minimum support + neighbors : map + keep track of neighbours of elements + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime : float + To record the start time of the mining process + endTime : float + To record the completion time of the mining process + itemsCnt : int + To record the number of fuzzy spatial itemSets generated + mapItemSum : map + To keep track of sum of Fuzzy Values of items + mapItemRegions : map + To Keep track of fuzzy regions of item + joinsCnt : int + To keep track of the number of FFI-list that was constructed + BufferSize : int + represent the size of Buffer + itemSetBuffer list + to keep track of items in buffer + + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + convert(value) + To convert the given user specified value + FSFIMining( prefix, prefixLen, fsFim, minSup) + Method generate FFI from prefix + construct(px, py) + A function to construct Fuzzy itemSet from 2 fuzzy itemSets + Intersection(neighbourX,neighbourY) + Return common neighbours of 2 itemSet Neighbours + findElementWithTID(uList, tid) + To find element with same tid as given + WriteOut(prefix, prefixLen, item, sumIUtil,period) + To Store the patten + + **Executing the code on terminal :** + ---------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 FGPFPMiner_old.py <inputFile> <outputFile> <neighbours> <minSup> <maxPer> <sep> + + Example Usage: + + (.venv) $ python3 FGPFPMiner_old.py sampleTDB.txt output.txt sampleN.txt 3 4 + + .. note:: minSup will be considered in percentage of database transactions + + + **Sample run of importing the code:** + ------------------------------------------ + + from PAMI.fuzzyGeoreferencedPeriodicFrequentPattern import FGPFPMiner as alg + + obj = alg.FFSPMiner("input.txt", "neighbours.txt", 3, 4) + + obj.mine() + + print("Total number of fuzzy frequent spatial patterns:", len(obj.getPatterns())) + + obj.save("outputFile") + + print("Total Memory in USS:", obj.getMemoryUSS()) + + print("Total Memory in RSS", obj.getMemoryRSS()) + + print("Total ExecutionTime in seconds:", obj.getRuntime()) + + **Credits:** + ---------------- + The complete program was written by B.Sai Chitra and Kundai Kwangwari under the supervision of Professor Rage Uday Kiran. + """ + + _startTime = float() + _endTime = float() + _minSup = str() + _maxPer = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _nFile = " " + _FuzFile = " " + _memoryUSS = float() + _memoryRSS = float() + _sep = "\t" + _transactionsDB = [] + _fuzzyValuesDB = [] + + def __init__(self, iFile, nFile, FuzFile, minSup, maxPer, sep): + super().__init__(iFile, nFile, FuzFile, minSup, maxPer, sep) + self._mapItemNeighbours = {} + self._startTime = 0 + self._endTime = 0 + self._itemsCnt = 0 + self._itemSupData = {} + self._mapItemSum = {} + self._finalClosedPeriodicPatterns = {} + self._mapItemRegions = {} + self._fuzzyRegionReferenceMap = {} + self._joinsCnt = 0 + self._BufferSize = 200 + self._itemSetBuffer = [] + self._finalPatterns = {} + self._finalPeriodicPatterns = {} + self._tidList = {} + self._dbLen = 0 + self._regionsNumber = 0 + self._RegionsCal = [] + self._RegionsLabel = [] + self._LabelKey = {} + + def _compareItems(self, o1, o2): + """ + A Function that sort all FFI-list in ascending order of Support + + :param o1: First FFI-list + :type o1: _FFList + :param o2: Second FFI-list + :type o2: _FFList + :return: Comparison Value + :rtype: int + """ + compare = self._mapItemSum[o1.item] - self._mapItemSum[o2.item] + if compare == 0: + return int(o1.item) - int(o2.item) + else: + return compare + + def _convert(self, value) -> float: + """ + To convert the given user specified value + + :param value: user specified value + :type value: int or float or str + :return: converted value + :rtype: float + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (self._dbLen * value) + if type(value) is str: + if '.' in value: + value = float(value) + else: + value = int(value) + return value + + def _fuzzyMembershipFunc(self): + try: + with open(self._FuzFile, 'r', encoding='utf-8') as f: + count = 0 + for line in f: + line = line.split("\n")[0] + parts = line.split(" ") + lowerBound = parts[0].strip() + upperBound = parts[1].strip() + lb_Label = parts[2].strip() + ub_Label = parts[3].strip() + self._RegionsCal.append([int(lowerBound), int(upperBound)]) + self._RegionsLabel.append([lb_Label, ub_Label]) + for i in range(0, 2): + if lb_Label.capitalize() not in self._LabelKey: + self._LabelKey[lb_Label.capitalize()] = count + count += 1 + if ub_Label.capitalize() not in self._LabelKey: + self._LabelKey[ub_Label.capitalize()] = count + count += 1 + except IOError: + print("File Not Found") + quit() + + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._transactionsDB, self._fuzzyValuesDB = [], [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._transactionsDB = self._iFile['Transactions'].tolist() + if 'fuzzyValues' in i: + self._fuzzyValuesDB = self._iFile['fuzzyValues'].tolist() + + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line = line.decode("utf-8") + line = line.split("\n")[0] + parts = line.split(":") + items = parts[0].split(self._sep) + quantities = parts[2].split(self._sep) + self._transactionsDB.append([x for x in items]) + self._fuzzyValuesDB.append([x for x in quantities]) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.split("\n")[0] + parts = line.split(":") + parts[0] = parts[0].strip() + parts[2] = parts[2].strip() + items = parts[0].split(self._sep) + quantities = parts[2].split(self._sep) + self._transactionsDB.append([x for x in items]) + self._fuzzyValuesDB.append([x for x in quantities]) + except IOError: + print("File Not Found") + quit() + + def _mapNeighbours(self): + """ + A function to map items to their Neighbours + """ + self._mapItemNeighbours = {} + if isinstance(self._nFile, _ab._pd.DataFrame): + data, items = [], [] + if self._nFile.empty: + print("its empty..") + i = self._nFile.columns.values.tolist() + if 'items' in i: + items = self._nFile['items'].tolist() + if 'Neighbours' in i: + data = self._nFile['Neighbours'].tolist() + for k in range(len(items)): + self._mapItemNeighbours[items[k]] = data[k] + + if isinstance(self._nFile, str): + if _ab._validators.url(self._nFile): + data = _ab._urlopen(self._nFile) + for line in data: + line = line.decode("utf-8") + line = line.split("\n")[0] + parts = [i.rstrip() for i in line.split(self._sep)] + parts = [x for x in parts] + item = parts[0] + neigh1 = [] + for i in range(1, len(parts)): + neigh1.append(parts[i]) + self._mapItemNeighbours[item] = neigh1 + else: + try: + with open(self._nFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.split("\n")[0] + parts = [i.rstrip() for i in line.split(self._sep)] + parts = [x for x in parts] + item = parts[0] + neigh1 = [] + for i in range(1, len(parts)): + neigh1.append(parts[i]) + self._mapItemNeighbours[item] = neigh1 + except IOError: + print(self._nFile) + print("File Not Found") + quit() + + def _Regions(self, quantity): + """ + param quantity: + type quantity: + """ + + self._list = [0] * len(self._LabelKey) + if self._RegionsCal[0][0] < quantity <= self._RegionsCal[0][1]: + self._list[0] = 1 + return + elif quantity >= self._RegionsCal[-1][0]: + self._list[-1] = 1 + return + else: + for i in range(1, len(self._RegionsCal) - 1): + if self._RegionsCal[i][0] < quantity <= self._RegionsCal[i][1]: + base = self._RegionsCal[i][1] - self._RegionsCal[i][0] + for pos in range(0, 2): + if self._RegionsLabel[i][pos].islower(): + self._list[self._LabelKey[self._RegionsLabel[i][pos].capitalize()]] = float( + (self._RegionsCal[i][1] - quantity) / base) + else: + self._list[self._LabelKey[self._RegionsLabel[i][pos].capitalize()]] = float( + (quantity - self._RegionsCal[i][0]) / base) + return + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + Frequent pattern mining process will start from here + """ + self.mine()
+ + +
+[docs] + def mine(self): + """ + Frequent pattern mining process will start from here + """ + self._startTime = _ab._time.time() + self._mapNeighbours() + self._creatingItemSets() + self._fuzzyMembershipFunc() + self._finalPatterns = {} + recent_occur = {} + for line in range(len(self._transactionsDB)): + item_list = self._transactionsDB[line] + fuzzyValues_list = self._fuzzyValuesDB[line] + self._dbLen += 1 + """ + This section below is for: + 1.Finding the support of each item's region in the entire database + 2.Finding the periodic patterns of the data + 3.Trimming off the patterns whose support is less than minSupport + """ + for i in range(0, len(item_list)): + item = item_list[i] + if item in self._tidList: + self._tidList[item].append(self._dbLen - recent_occur[item][-1]) + recent_occur[item].append(self._dbLen) + else: + self._tidList[item] = [self._dbLen] + recent_occur[item] = [self._dbLen] + fuzzy_ref = fuzzyValues_list[i] + if item in self._mapItemNeighbours: + if fuzzy_ref not in self._fuzzyRegionReferenceMap: + self._Regions(int(fuzzy_ref)) + self._fuzzyRegionReferenceMap[fuzzy_ref] = self._list + + if item in self._itemSupData.keys(): + self._itemSupData[item] = [sum(i) for i in zip(self._itemSupData[item], + self._fuzzyRegionReferenceMap[fuzzy_ref])] + else: + self._itemSupData[item] = self._fuzzyRegionReferenceMap[fuzzy_ref] + + for item in self._tidList.keys(): + self._tidList[item].append(len(self._transactionsDB) - recent_occur[item][-1]) + del recent_occur + """ + Using Maximum Scalar Cardinality Value strategy to narrow down search space and generate candidate fuzzy periodic-frequent items. + Step1. Identify the regional representative (region with max support). This is the representative that will be tested to see if its greater than given minSup + Step2. prune out all items whose regional support is less than the given minSup + Step3. At the end, sort the list of stored Candidate Frequent-Periodic Patterns in ascending order + """ + + listOfFFList = [] + mapItemsToFFLIST = {} + region_label = [] + for i in range(0, len(self._RegionsLabel)): + if self._RegionsLabel[i][1] not in region_label: + region_label.append(str(self._RegionsLabel[i][1])) + + self._minSup = self._convert(self._minSup) + for item in self._itemSupData.keys(): + if max(self._itemSupData[item]) >= self._minSup: + self._mapItemSum[item] = max(self._itemSupData[item]) + self._mapItemRegions[item] = region_label[self._itemSupData[item].index(self._mapItemSum[item])] + fuList = _FFList(item) + if int(self._maxPer) >= max(self._tidList[item]): + fuList.isPeriodic = True + mapItemsToFFLIST[item] = fuList + listOfFFList.append(fuList) + + del self._itemSupData + del self._tidList + listOfFFList.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + tid = 0 + for j in range(len(self._transactionsDB)): + item_list = list(set(self._transactionsDB[j]).intersection(set(self._mapItemSum.keys()))) + revisedTransaction = [] + for i in range(0, len(item_list)): + pair = _Pair() + pair.item = item_list[i] + fuzzy_ref = str(self._fuzzyValuesDB[j][self._transactionsDB[j].index(pair.item)]) + pair.quantity = self._fuzzyRegionReferenceMap[fuzzy_ref][ + region_label.index(self._mapItemRegions[pair.item])] + if pair.quantity > 0: + revisedTransaction.append(pair) + revisedTransaction.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + qaunt = {} + for i in range(len(revisedTransaction) - 1, -1, -1): + pair = revisedTransaction[i] + qaunt[pair.item] = pair.quantity + remainUtil = 0 + temp = list(set(self._mapItemNeighbours[pair.item]).intersection(set(qaunt.keys()))) + for j in temp: + remainUtil += float(qaunt[j]) + del temp + remainingUtility = remainUtil + FFListObject = mapItemsToFFLIST[pair.item] + element = _Element(tid, pair.quantity, remainingUtility) + FFListObject.addElement(element) + del qaunt + tid += 1 + itemNeighbours = list(self._mapItemNeighbours.keys()) + self._FSFIMining(self._itemSetBuffer, 0, listOfFFList, self._minSup, itemNeighbours) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + + def _FSFIMining(self, prefix, prefixLen, FSFIM, minSup, itemNeighbours): + """ + Generates FFSPMiner from prefix + + :param prefix: the prefix patterns of FFSPMiner + :type prefix: len + :param prefixLen: the length of prefix + :type prefixLen: int + :param FSFIM: the Fuzzy list of prefix itemSets + :type FSFIM: list + :param minSup: the minimum support of + :type minSup: int + :param itemNeighbours: the set of common neighbours of prefix + :type itemNeighbours: list or set + """ + for i in range(0, len(FSFIM)): + _FFListObject1 = FSFIM[i] + if _FFListObject1.sumIUtil >= minSup: + self._WriteOut(prefix, prefixLen, _FFListObject1, _FFListObject1.sumIUtil) + newNeighbourList = self._Intersection(self._mapItemNeighbours.get(_FFListObject1.item), itemNeighbours) + if _FFListObject1.sumRUtil >= minSup: + exULs = [] + for j in range(i + 1, len(FSFIM)): + _FFListObject2 = FSFIM[j] + if _FFListObject2.item in newNeighbourList: + exULs.append(self._construct(_FFListObject1, _FFListObject2)) + self._joinsCnt += 1 + self._itemSetBuffer.insert(prefixLen, _FFListObject1.item) + self._FSFIMining(self._itemSetBuffer, prefixLen + 1, exULs, minSup, newNeighbourList) + + def _Intersection(self, neighbourX, neighbourY): + """ + A function to get common neighbours from 2 itemSets + + :param neighbourX: the set of neighbours of itemSet 1 + :type neighbourX: set or list + :param neighbourY: the set of neighbours of itemSet 2 + :type neighbourY: set or list + :return: set of common neighbours of 2 itemSets + :rtype: set + """ + result = [] + if neighbourX is None or neighbourY is None: + return result + for i in range(0, len(neighbourX)): + if neighbourX[i] in neighbourY: + result.append(neighbourX[i]) + return result + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime - self._startTime
+ + + def _construct(self, _FFListObject1, _FFListObject2): + """ + A function to construct a new Fuzzy itemSet from 2 fuzzy itemSets + + :param _FFListObject1: the itemSet px + :type _FFListObject1: FFI-List + :param _FFListObject2: itemSet py + :type _FFListObject2: FFI-List + :return: the itemSet of pxy(px and py) + :rtype: FFI-List + """ + recent_occur, first_occur, tid = 0, 0, 0 + periodlist = [] + _newFFListObject = _FFList(_FFListObject2.item) + for Ob1Element in _FFListObject1.elements: + Ob2Element = self._findElementWithTID(_FFListObject2, Ob1Element.tid) + if Ob2Element is None: + continue + tid = Ob1Element.tid + if len(periodlist) == 0: + periodlist.append(abs(first_occur - tid)) + recent_occur = tid + else: + periodlist.append(tid - recent_occur) + recent_occur = tid + newElement = _Element(Ob1Element.tid, min([Ob1Element.iUtils, Ob2Element.iUtils], key=lambda x: float(x)), + Ob2Element.rUtils) + _newFFListObject.addElement(newElement) + + if periodlist and int(self._maxPer) >= max(periodlist): + _newFFListObject.isPeriodic = True + else: + _newFFListObject.isPeriodic = False + return _newFFListObject + + def _findElementWithTID(self, uList, tid): + """ + To find element with same tid as given + + :param uList: fuzzyList + :type uList: FFI-List + :param tid: transaction id + :type tid: int + :return: element tid as given + :rtype: element if exist or None + """ + List = uList.elements + first = 0 + last = len(List) - 1 + while first <= last: + mid = (first + last) >> 1 + if List[mid].tid < tid: + first = mid + 1 + elif List[mid].tid > tid: + last = mid - 1 + else: + return List[mid] + return None + + def _WriteOut(self, prefix, prefixLen, _FFListObject, sumIUtil): + """ + To Store the patten + + :param prefix: prefix of itemSet + :type prefix: list + :param prefixLen: length of prefix + :type prefixLen: int + :param _FFListObject: the last item + :type _FFListObject: int + :param sumIUtil: sum of utility of itemSet + :type sumIUtil: float + """ + item = _FFListObject.item + self._itemsCnt += 1 + res = "" + for i in range(0, prefixLen): + res += str(prefix[i]) + "." + str(self._mapItemRegions[prefix[i]]) + "\t" + res += str(item) + "." + str(self._mapItemRegions.get(item)) + res1 = str(sumIUtil) + self._finalPatterns[res] = res1 + + if _FFListObject.isPeriodic: + self._finalPeriodicPatterns[res] = res1 + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPeriodicPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPeriodicPatterns
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self.oFile = outFile + keylist = (self._finalPatterns.keys()) + writer = open(self.oFile, 'w+') + for x in keylist: + patternsAndSupport = x.strip() + ":" + str(self._finalPatterns[x]) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Total number of Spatial Fuzzy Periodic-Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+ + +
+[docs] + def getPatternsAsDataframe(self): + + """ + :return: returning periodic frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + data = [] + dataFrame = _ab._pd.DataFrame() + for a, b in self._finalPeriodicPatterns.items(): + data.append([a, b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def generateLatexCode(self, result): + + titles = result.columns.tolist() + titles.remove("minsup") + titles.remove("algorithm") + for i in range(0, len(titles)): + legendary = pd.unique(result[['algorithm']].values.ravel()) + color = ['red', 'blue', 'green', 'black', 'yellow'] + xaxis = result["minsup"].values.tolist() + yaxis = result[titles[i]].values.tolist() + algo = result["algorithm"].values.tolist() + x_label = "minsup" + filename = titles[i] + latexwriter = open(filename + "Latexfile.tex", "w") + latexwriter.write("") + latexwriter.write("\\begin{axis}[\n\txlabel={\\Huge{" + x_label + "}},") + latexwriter.write("\n\tylabel={\\Huge{" + titles[i] + "}},") + latexwriter.write("\n\txmin=" + str(min(xaxis)) + ", xmax=" + str(max(xaxis)) + ",") + + for num in range(0, len(legendary)): + latexwriter.write("\n\\addplot+ [" + color[num] + "]\n\tcoordinates {\n") + for num2 in range(0, len(xaxis)): + if (legendary[num] == algo[num2]): + latexwriter.write("(" + str(xaxis[num2]) + "," + str(yaxis[num2]) + ")\n") + latexwriter.write("\t}; \\addlegendentry{" + legendary[num] + "}\n") + if (num + 1 == len(legendary)): + latexwriter.write("\\end{axis}") + print("Latex file generated successfully")
+ + +
+[docs] + def generateGraphs(result): + + fig = px.line(result, x='minsup', y='patterns', color='algorithm', title='Patterns)', markers=True) + fig.show() + fig = px.line(result, x='minsup', y='runtime', color='algorithm', title='Runtime)', markers=True) + fig.show() + fig = px.line(result, x='minsup', y='memoryUSS', color='algorithm', title='MemoryUSS)', markers=True) + fig.show() + fig = px.line(result, x='minsup', y='memoryRSS', color='algorithm', title='MemoryRSS)', markers=True) + fig.show()
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 7: + if len(_ab._sys.argv) == 6: + _ap = FGPFPMiner(_ab._sys.argv[1], _ab._sys.argv[2], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5], + _ab._sys.argv[6]) + if len(_ab._sys.argv) == 5: + _ap = FGPFPMiner(_ab._sys.argv[1], _ab._sys.argv[2], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + _ap.startMine() + _ap.mine() + print("Total number of Spatial Fuzzy Periodic Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + _ap.save("outputfile.txt") + else: + print("Error! The number of input parameters do not match the total number of parameters provided") + + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/fuzzyPartialPeriodicPatterns/basic/F3PMiner.html b/sphinx/_build/html/_modules/PAMI/fuzzyPartialPeriodicPatterns/basic/F3PMiner.html new file mode 100644 index 000000000..676893ef7 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/fuzzyPartialPeriodicPatterns/basic/F3PMiner.html @@ -0,0 +1,776 @@ + + + + + + PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner

+# F3PMiner algorithm discovers the fuzzy partial periodic patterns in quantitative Irregulat multiple timeseries databases.
+#
+#
+# **Importing this algorithm into a python program**
+# ----------------------------------------------------
+#
+#             import PAMI.fuzzyPartialPeriodicPattern.basic.F3PMiner as alg
+#
+#             obj = alg.F3PMiner(iFile, minSup, sep)
+#
+#             obj.mine()
+#
+#             fuzzyPartialPeriodicPatterns = obj.getPatterns()
+#
+#             print("Total number of Fuzzy Partial Periodic Patterns:", len(fuzzyPartialPeriodicPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+__copyright__ = """
+
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+from PAMI.fuzzyPartialPeriodicPatterns.basic import abstract as _ab
+from deprecated import deprecated
+
+
+class _FFList:
+    """
+     A class represent a Fuzzy List of an element
+
+    :Attributes:
+
+         item : int
+             the item name
+         sumIUtil : float
+             the sum of utilities of an fuzzy item in database
+         sumRUtil : float
+             the sum of resting values of a fuzzy item in database
+         elements : list
+             a list of elements contain tid,Utility and resting values of element in each transaction
+
+    :Methods:
+
+        addElement(element)
+            Method to add an element to this fuzzy list and update the sums at the same time.
+        printElement(e)
+            Method to print elements
+
+    """
+
+    def __init__(self, itemName):
+        self.item = itemName
+        self.sumIUtil = 0.0
+        self.elements = []
+
+    def addElement(self, element):
+        """
+        A Method that add a new element to FFList
+
+        :param element: an element to be added to FFList
+        :type element: Element
+        """
+        self.sumIUtil += element.iUtils
+        self.elements.append(element)
+
+    def printElement(self):
+        """
+        A method to print elements
+        """
+        for ele in self.elements:
+            print(ele.tid, ele.iUtils, ele.rUtils)
+
+
+class _Element:
+    """
+    A class represents an Element of a fuzzy list
+
+    :Attributes:
+
+        tid : int
+            keep tact of transaction id
+        iUtils : float
+            the utility of an fuzzy item in the transaction
+        rUtils : float
+            the  resting value of an fuzzy item in the transaction
+    """
+
+    def __init__(self, tid, iUtil):
+        self.tid = tid
+        self.iUtils = iUtil
+
+
+class _Pair:
+    """
+    A class to store item and it's quantity together
+    """
+
+    def __init__(self):
+        self.item = 0
+        self.quantity = 0
+
+
+
+[docs] +class F3PMiner(_ab._fuzzyPartialPeriodicPatterns): + """ + :Description: F3PMiner algorithm discovers the fuzzy partial periodic patterns in quantitative Irregulat multiple timeseries databases. + + :Reference: + + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float. + :param maxPer: float : + The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + + :Attributes: + + iFile : string + Name of the input file to mine complete set of fuzzy spatial frequent patterns + oFile : string + Name of the oFile file to store complete set of fuzzy spatial frequent patterns + minSup : float + The user given minimum support + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime : float + To record the start time of the mining process + endTime : float + To record the completion time of the mining process + itemsCnt : int + To record the number of fuzzy spatial itemSets generated + mapItemsGSum : map + To keep track of G region values of items + mapItemsMidSum: map + To keep track of M region values of items + mapItemsHSum: map + To keep track of H region values of items + mapItemSum: map + To keep track of sum of Fuzzy Values of items + mapItemRegions: map + To Keep track of fuzzy regions of item + joinsCnt: int + To keep track of the number of ffi-list that was constructed + BufferSize: int + represent the size of Buffer + itemSetBuffer list + to keep track of items in buffer + + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + convert(value) + To convert the given user specified value + compareItems(o1, o2) + A Function that sort all ffi-list in ascending order of Support + F3PMining(prefix, prefixLen, FSFIM, minSup) + Method generate ffi from prefix + construct(px, py) + A function to construct Fuzzy itemSet from 2 fuzzy itemSets + findElementWithTID(uList, tid) + To find element with same tid as given + WriteOut(prefix, prefixLen, item, sumIUtil) + To Store the patten + + **Executing the code on terminal :** + --------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 F3PMiner.py <inputFile> <outputFile> <minSup> <separator> + + Example Usage: + + (.venv) $ python3 F3PMiner.py sampleTDB.txt output.txt 6 + + .. note:: minSup will be considered in percentage of database transactions + + + **Sample run of importing the code:** + -------------------------------------- + + from PAMI.fuzzyPartialPeriodicPatterns import F3PMiner as alg + + obj = alg.F3PMiner("input.txt", 2) + + obj.mine() + + fuzzyPartialPeriodicPatterns = obj.getPatterns() + + print("Total number of Fuzzy Frequent Patterns:", len(fuzzyPartialPeriodicPatterns)) + + obj.save("outputFile") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + ------------- + The complete program was written by PALLA Likhitha under the supervision of Professor Rage Uday Kiran. + """ + + _startTime = float() + _endTime = float() + _minSup = str() + _maxPer = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _memoryUSS = float() + _memoryRSS = float() + _sep = "\t" + + def __init__(self, iFile, minSup, sep="\t"): + super().__init__(iFile, minSup, sep) + self._startTime = 0 + self._endTime = 0 + self._itemsCnt = 0 + self._mapItemSum = {} + self._mapItemRegions = {} + self._joinsCnt = 0 + self._BufferSize = 200 + self._itemSetBuffer = [] + self._transactions = [] + self._fuzzyValues = [] + self._ts = [] + self._finalPatterns = {} + self._dbLen = 0 + + def _compareItems(self, o1, o2): + """ + A Function that sort all ffi-list in ascending order of Support + + :param o1: First FFI-list + :type o1: _FFList + :param o2: Second FFI-list + :type o2: _FFList + :return: Comparison Value + :rtype: int + """ + compare = self._mapItemSum[o1.item] - self._mapItemSum[o2.item] + if compare == 0: + if o1.item < o2.item: + return -1 + elif o1.item > o2.item: + return 1 + else: + return 0 + else: + return compare + + def _convert(self, value): + """ + To convert the given user specified value + + :param value: user specified value + :type value: int or float or str + :return: converted value + :rtype: float + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (self._dbLen * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (self._dbLen * value) + else: + value = int(value) + return value + + def _creatingItemsets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._transactions, self._fuzzyValues, self._Database, self._ts = [], [], [], [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._transactions = self._iFile['Transactions'].tolist() + if 'fuzzyValues' in i: + self._fuzzyValues = self._iFile['fuzzyValues'].tolist() + # print(self.Database) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line = line.decode("utf-8") + line = line.split("\n")[0] + parts = line.split(":") + parts[0] = parts[0].strip() + parts[2] = parts[2].strip() + items = parts[0].split(self._sep) + quantities = parts[2].split(self._sep) + self._transactions.append([x for x in items]) + self._fuzzyValues.append([x for x in quantities]) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + parts = line.split(":") + parts[0] = parts[0].strip() + parts[1] = parts[1].strip() + parts[2] = parts[2].strip() + times = parts[0].split(self._sep) + items = parts[1].split(self._sep) + quantities = parts[2].split(self._sep) + #print(times, items, quantities) + _time = [x for x in times if x] + items = [x for x in items if x] + quantities = [float(x) for x in quantities if x] + tempList = [] + for k in range(len(_time)): + ite = "(" + _time[k] + "," + items[k] + ")" + tempList.append(ite) + self._ts.append([x for x in times]) + self._transactions.append([x for x in tempList]) + self._fuzzyValues.append([x for x in quantities]) + except IOError: + print("File Not Found") + quit() + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + fuzzy-Frequent pattern mining process will start from here + """ + self.mine()
+ + +
+[docs] + def mine(self): + """ + fuzzy-Frequent pattern mining process will start from here + """ + self._startTime = _ab._time.time() + self._creatingItemsets() + for line in range(len(self._transactions)): + times = self._ts[line] + items = self._transactions[line] + quantities = self._fuzzyValues[line] + self._dbLen += 1 + for i in range(0, len(items)): + item = items[i] + if item in self._mapItemSum: + self._mapItemSum[item] += quantities[i] + else: + self._mapItemSum[item] = quantities[i] + listOfffilist = [] + mapItemsToFFLIST = {} + #self._minSup = float(self._minSup) + self._minSup = self._convert(self._minSup) + minSup = self._minSup + for item1 in self._mapItemSum.keys(): + item = item1 + # print(type(self._mapItemSum[item])) + if self._mapItemSum[item] >= self._minSup: + fuList = _FFList(item) + mapItemsToFFLIST[item] = fuList + listOfffilist.append(fuList) + listOfffilist.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + tid = 0 + for line in range(len(self._transactions)): + items = self._transactions[line] + quantities = self._fuzzyValues[line] + revisedTransaction = [] + for i in range(0, len(items)): + pair = _Pair() + pair.item = items[i] + pair.quantity = quantities[i] + item = pair.item + if self._mapItemSum[item] >= self._minSup: + if pair.quantity > 0: + revisedTransaction.append(pair) + revisedTransaction.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + for i in range(len(revisedTransaction) - 1, -1, -1): + pair = revisedTransaction[i] + if mapItemsToFFLIST.get(pair.item) is not None: + FFListOfItem = mapItemsToFFLIST[pair.item] + element = _Element(tid, pair.quantity) + FFListOfItem.addElement(element) + tid += 1 + self._F3PMining(self._itemSetBuffer, 0, listOfffilist, self._minSup) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + + def _F3PMining(self, prefix, prefixLen, FSFIM, minSup): + """ + Generates ffi from prefix + + :param prefix: the prefix patterns of ffi + :type prefix: len + :param prefixLen: the length of prefix + :type prefixLen: int + :param FSFIM: the Fuzzy list of prefix itemSets + :type FSFIM: list + :param minSup: the minimum support of + :type minSup:int + """ + for i in range(0, len(FSFIM)): + X = FSFIM[i] + exULs = [] + if X.sumIUtil >= minSup: + self._WriteOut(prefix, prefixLen, X.item, X.sumIUtil) + for j in range(i + 1, len(FSFIM)): + Y = FSFIM[j] + exULs.append(self._construct(X, Y)) + self._joinsCnt += 1 + self._itemSetBuffer.insert(prefixLen, X.item) + self._F3PMining(self._itemSetBuffer, prefixLen + 1, exULs, minSup) + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime - self._startTime
+ + + def _construct(self, px, py): + """ + A function to construct a new Fuzzy itemSet from 2 fuzzy itemSets + + :param px:the itemSet px + :type px:ffi-List + :param py:itemSet py + :type py:ffi-List + :return :the itemSet of pxy(px and py) + :rtype :ffi-List + """ + pxyUL = _FFList(py.item) + for ex in px.elements: + ey = self._findElementWithTID(py, ex.tid) + if ey is None: + continue + eXY = _Element(ex.tid, min([ex.iUtils, ey.iUtils], key=lambda x: float(x))) + pxyUL.addElement(eXY) + return pxyUL + + def _findElementWithTID(self, uList, tid): + """ + To find element with same tid as given + + :param uList: fuzzyList + :type uList: ffi-List + :param tid: transaction id + :type tid: int + :return: element tid as given + :rtype: element if exit or None + """ + List = uList.elements + first = 0 + last = len(List) - 1 + while first <= last: + mid = (first + last) >> 1 + if List[mid].tid < tid: + first = mid + 1 + elif List[mid].tid > tid: + last = mid - 1 + else: + return List[mid] + return None + + def _WriteOut(self, prefix, prefixLen, item, sumIUtil): + """ + To Store the patten + + :param prefix: prefix of itemSet + :type prefix: list + :param prefixLen: length of prefix + :type prefixLen: int + :param item: the last item + :type item: int + :param sumIUtil: sum of utility of itemSet + :type sumIUtil: float + + """ + self._itemsCnt += 1 + res = "" + for i in range(0, prefixLen): + res += str(prefix[i]) + "\t" + res += str(item) + res1 = str(sumIUtil) + self._finalPatterns[res] = res1 + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to a output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Total number of Fuzzy Partial Periodic Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in seconds:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = F3PMiner(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = F3PMiner(_ab._sys.argv[1], _ab._sys.argv[3]) + _ap.startMine() + _ap.mine() + _ap.save(_ab._sys.argv[2]) + _ap.printResults() + else: + print("Error! The number of input parameters do not match the total number of parameters provided") + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/fuzzyPeriodicFrequentPattern/basic/FPFPMiner.html b/sphinx/_build/html/_modules/PAMI/fuzzyPeriodicFrequentPattern/basic/FPFPMiner.html new file mode 100644 index 000000000..b04ddf651 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/fuzzyPeriodicFrequentPattern/basic/FPFPMiner.html @@ -0,0 +1,837 @@ + + + + + + PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner

+# Fuzzy Periodic Frequent Pattern Miner is desired to find all fuzzy periodic frequent patterns which is
+# on-trivial and challenging problem to its huge search space.we are using efficient pruning
+# techniques to reduce the search space.
+#
+# Sample run of importing the code:
+# ----------------------------------------
+#
+#             from PAMI.fuzzyPeriodicFrequentPattern.basic import FPFPMiner as alg
+#
+#             obj =alg.FPFPMiner("input.txt",2,3)
+#
+#             obj.mine()
+#
+#             periodicFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Fuzzy Periodic Frequent Patterns:", len(periodicFrequentPatterns))
+#
+#             obj.save("output.txt")
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+
+from PAMI.fuzzyPeriodicFrequentPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+from deprecated import deprecated
+
+
+class _FFList:
+    """
+    A class represent a Fuzzy List of an element
+    :Attributes:
+
+        item : int
+            the item name
+        sumLUtil : float
+            the sum of utilities of a fuzzy item in database
+        sumRUtil : float
+            the sum of resting values of a fuzzy item in database
+        elements : list
+            list of elements contain tid,Utility and resting values of element in each transaction
+        maxPeriod : int
+            it represents the max period of a item
+
+    :Methods:
+
+        addElement(element)
+            Method to add an element to this fuzzy list and update the sums at the same time.
+        printElement(e)
+            Method to print elements
+
+    """
+
+    def __init__(self, itemName: str) -> None:
+        self.item = itemName
+        self.sumLUtil = 0.0
+        self.sumRUtil = 0.0
+        self.elements = []
+        self.maxPeriod = 0
+
+    def addElement(self, element) -> None:
+        """
+        A Method that add a new element to FFList
+
+        :param element: an element to be added to FFList
+        :type element: Element
+        :return: None
+        """
+        self.sumLUtil += element.lUtils
+        self.sumRUtil += element.rUtils
+        self.elements.append(element)
+        self.maxPeriod = max(self.maxPeriod, element.period)
+
+    def printElement(self) -> None:
+        """
+        A Method to Print elements in the FFList
+        :return: None
+        """
+        for ele in self.elements:
+            print(ele.tid, ele.lUtils, ele.rUtils, ele.period)
+
+
+class _Element:
+    """
+        A class represents an Element of a fuzzy list
+
+        :Attributes:
+
+        tid : int
+            keep tact of transaction id
+        lUtils: float
+            the utility of a fuzzy item in the transaction
+        rUtils : float
+            the resting value of a fuzzy item in the transaction
+        period: int
+            represent the period of the element
+    """
+
+    def __init__(self, tid: int, iUtil: float, rUtil: float, period: int) -> None:
+        self.tid = tid
+        self.lUtils = iUtil
+        self.rUtils = rUtil
+        self.period = period
+
+
+class _Pair:
+    """
+    A class to store item name and quantity together.
+    """
+
+    def __init__(self) -> None:
+        self.item = 0
+        self.quantity = 0
+
+
+
+[docs] +class FPFPMiner(_ab._fuzzyPeriodicFrequentPatterns): + """ + :Description: Fuzzy Periodic Frequent Pattern Miner is desired to find all fuzzy periodic frequent patterns which is + on-trivial and challenging problem to its huge search space.we are using efficient pruning + techniques to reduce the search space. + + :Reference: R. U. Kiran et al., "Discovering Fuzzy Periodic-Frequent Patterns in Quantitative Temporal Databases," + 2020 IEEE International Conference on Fuzzy Systems (FUZZ-IEEE), Glasgow, UK, 2020, pp. + 1-8, doi: 10.1109/FUZZ48607.2020.9177579. + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float. + :param maxPer: float : + The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the input file to mine complete set of fuzzy spatial frequent patterns + oFile : file + Name of the oFile file to store complete set of fuzzy spatial frequent patterns + minSup : float + The user given support + period: int + periodicity of an element + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + itemsCnt: int + To record the number of fuzzy spatial itemSets generated + mapItemsLowSum: map + To keep track of low region values of items + mapItemsMidSum: map + To keep track of middle region values of items + mapItemsHighSum: map + To keep track of high region values of items + mapItemSum: map + To keep track of sum of Fuzzy Values of items + mapItemRegions: map + To Keep track of fuzzy regions of item + jointCnt: int + To keep track of the number of FFI-list that was constructed + BufferSize: int + represent the size of Buffer + itemBuffer list + to keep track of items in buffer + maxTID: int + represent the maximum tid of the database + lastTIDs: map + represent the last tid of fuzzy items + itemsToRegion: map + represent items with respective regions + + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + convert(value): + To convert the given user specified value + FSFIMining( prefix, prefixLen, fsFim, minSup) + Method generate FFI from prefix + construct(px, py) + A function to construct Fuzzy itemSet from 2 fuzzy itemSets + findElementWithTID(UList, tid) + To find element with same tid as given + WriteOut(prefix, prefixLen, item, sumIUtil,period) + To Store the patten + + **Executing the code on terminal :** + ---------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 FPFPMiner.py <inputFile> <outputFile> <minSup> <maxPer> <sep> + + Example Usage: + + (.venv) $ python3 FPFPMiner.py sampleTDB.txt output.txt 2 3 + + .. note:: minSup will be considered in percentage of database transactions + + + **Sample run of importing the code:** + -------------------------------------- + + from PAMI.fuzzyPeriodicFrequentPattern.basic import FPFPMiner as alg + + obj =alg.FPFPMiner("input.txt",2,3) + + obj.mine() + + periodicFrequentPatterns = obj.getPatterns() + + print("Total number of Fuzzy Periodic Frequent Patterns:", len(periodicFrequentPatterns)) + + obj.save("output.txt") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + -------------- + The complete program was written by Sai Chitra.B under the supervision of Professor Rage Uday Kiran. + + """ + _startTime = float() + _endTime = float() + _minSup = float() + _maxPer = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _memoryUSS = float() + _memoryRSS = float() + _sep = " " + _Database = [] + _transactions = [] + _fuzzyValues = [] + _ts = [] + + def __init__(self, iFile: Union[str, _ab._pd.DataFrame], minSup: Union[int, float], period: Union[int, float], sep: str="\t") -> None: + super().__init__(iFile, minSup, period, sep) + self._oFile = "" + self._BufferSize = 200 + self._itemSetBuffer = [] + self._mapItemSum = {} + self._finalPatterns = {} + self._joinsCnt = 0 + self._itemsCnt = 0 + self._startTime = float() + self._endTime = float() + self._memoryUSS = float() + self._memoryRSS = float() + self._dbLen = 0 + + def _compareItems(self, o1, o2) -> int: + """ + A Function that sort all FFI-list in ascending order of Support + + :param o1: First FFI-list + + :type o1: _FFList + + :param o2: Second FFI-list + + :type o1: _FFList + + :return: Comparision Value + + :rtype: int + """ + compare = self._mapItemSum[o1.item] - self._mapItemSum[o2.item] + if compare == 0: + return int(o1.item) - int(o2.item) + else: + return compare + + def _convert(self, value) -> float: + """ + To convert the given user specified value + + :param value: user specified value + + :type value: int or float or str + + :return: converted value + + :rtype: float + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (self._dbLen * value) + if type(value) is str: + if '.' in value: + value = (self._dbLen * value) + else: + value = int(value) + return value + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + + :return: None + """ + data, self._transactions, self._fuzzyValues, ts = [], [], [], [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + self._ts = self._iFile['TS'].tolist() + if 'Transactions' in i: + self._transactions = self._iFile['Transactions'].tolist() + if 'fuzzyValues' in i: + self._fuzzyValues = self._iFile['fuzzyValues'].tolist() + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + count = 0 + for line in data: + line = line.decode("utf-8") + line = line.split("\n")[0] + parts = line.split(":") + parts[0] = parts[0].strip() + parts[1] = parts[1].strip() + items = parts[0].split(self._sep) + quantities = parts[1].split(self._sep) + self._ts.append(int(items[0])) + self._transactions.append([x for x in items[1:]]) + self._fuzzyValues.append([float(x) for x in quantities]) + count += 1 + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + count = 0 + for line in f: + line = line.split("\n")[0] + parts = line.split(":") + parts[0] = parts[0].strip() + parts[1] = parts[1].strip() + items = parts[0].split(self._sep) + quantities = parts[1].split(self._sep) + self._ts.append(int(items[0])) + self._transactions.append([x for x in items[1:]]) + self._fuzzyValues.append([float(x) for x in quantities]) + count += 1 + except IOError: + print("File Not Found") + quit() + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + Fuzzy periodic Frequent pattern mining process will start from here + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + Fuzzy periodic Frequent pattern mining process will start from here + """ + maxTID = 0 + lastTIDs = {} + self._startTime = _ab._time.time() + self._creatingItemSets() + self._finalPatterns = {} + tid = int() + for line in range(len(self._transactions)): + tid = int(self._ts[line]) + self._dbLen += 1 + items = self._transactions[line] + quantities = self._fuzzyValues[line] + if tid < maxTID: + maxTID = tid + for i in range(0, len(items)): + item = items[i] + if item in self._mapItemSum: + self._mapItemSum[item] += quantities[i] + else: + self._mapItemSum[item] = quantities[i] + listOfFFIList = [] + mapItemsToFFLIST = {} + # self._minSup = self._convert(self._minSup) + self._minSup = float(self._minSup) + self._maxPer = self._convert(self._maxPer) + for item1 in self._mapItemSum.keys(): + item = item1 + if self._mapItemSum[item] >= self._minSup: + fUList = _FFList(item) + k = tuple([item]) + mapItemsToFFLIST[k] = fUList + listOfFFIList.append(fUList) + lastTIDs[item] = tid + listOfFFIList.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + for line in range(len(self._transactions)): + tid = int(self._ts[line]) + items = self._transactions[line] + quantities = self._fuzzyValues[line] + revisedTransaction = [] + for i in range(0, len(items)): + pair = _Pair() + pair.item = items[i] + item = pair.item + pair.quantity = quantities[i] + if self._mapItemSum[item] >= self._minSup: + if pair.quantity > 0: + revisedTransaction.append(pair) + revisedTransaction.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + for i in range(len(revisedTransaction) - 1, -1, -1): + pair = revisedTransaction[i] + remainUtil = 0 + for j in range(len(revisedTransaction) - 1, i - 1, -1): + remainUtil += revisedTransaction[j].quantity + if pair.quantity > remainUtil: + remainingUtility = pair.quantity + else: + remainingUtility = remainUtil + if mapItemsToFFLIST.get(tuple([pair.item])) is not None: + FFListOfItem = mapItemsToFFLIST[tuple([pair.item])] + if len(FFListOfItem.elements) == 0: + element = _Element(tid, pair.quantity, remainingUtility, 0) + else: + if lastTIDs[pair.item] == tid: + element = _Element(tid, pair.quantity, remainingUtility, maxTID - tid) + else: + lastTid = FFListOfItem.elements[-1].tid + curPer = tid - lastTid + element = _Element(tid, pair.quantity, remainingUtility, curPer) + FFListOfItem.addElement(element) + self._FPFPMining(self._itemSetBuffer, 0, listOfFFIList) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + + def _FPFPMining(self, prefix, prefixLen, fsFim): + + """ + Generates FPFP from prefix + + :param prefix: the prefix patterns of FPFP + :type prefix: len + :param prefixLen: the length of prefix + :type prefixLen: int + :param fsFim: the Fuzzy list of prefix itemSets + :type fsFim: list + """ + for i in range(0, len(fsFim)): + X = fsFim[i] + if X.sumLUtil >= self._minSup and X.maxPeriod <= self._maxPer: + self._WriteOut(prefix, prefixLen, X.item, X.sumLUtil, X.maxPeriod) + if X.sumRUtil >= self._minSup: + exULs = [] + for j in range(i + 1, len(fsFim)): + Y = fsFim[j] + exULs.append(self._construct(X, Y)) + self._joinsCnt += 1 + self._itemSetBuffer.insert(prefixLen, X.item) + self._FPFPMining(self._itemSetBuffer, prefixLen + 1, exULs) + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime - self._startTime
+ + + def _construct(self, px: _FFList, py: _FFList) -> _FFList: + """ + A function to construct a new Fuzzy item set from 2 fuzzy itemSets + + :param px:the item set px + :type px:FFI-List + :param py:item set py + :type py:FFI-List + :return :the item set of pxy(px and py) + :rtype :FFI-List + """ + pxyUL = _FFList(py.item) + prev = 0 + for ex in px.elements: + ey = self._findElementWithTID(py, ex.tid) + if ey is None: + continue + eXY = _Element(ex.tid, min([ex.lUtils, ey.lUtils], key=lambda x: float(x)), ey.rUtils, ex.tid - prev) + pxyUL.addElement(eXY) + prev = ex.tid + return pxyUL + + def _findElementWithTID(self, UList, tid) -> _Element: + """ + To find element with same tid as given + + :param UList: fuzzy list + :type UList: FFI-List + :param tid:transaction id + :type tid: int + :return: element eith tid as given + :rtype: element if exist or None + """ + List = UList.elements + first = 0 + last = len(List) - 1 + while first <= last: + mid = (first + last) >> 1 + if List[mid].tid < tid: + first = mid + 1 + elif List[mid].tid > tid: + last = mid - 1 + else: + return List[mid] + return None + + def _WriteOut(self, prefix: List[int], prefixLen: int, item: int, sumLUtil: float, period: int) -> None: + """ + To Store the patten + + :param prefix: prefix of itemSet + :type prefix: list + :param prefixLen: length of prefix + :type prefixLen: int + :param item: the last item + :type item: int + :param sumLUtil: sum of utility of itemSet + :type sumLUtil: float + :param period: represent the period of itemSet + :type period: int + :return: None + """ + self._itemsCnt += 1 + res = "" + for i in range(0, prefixLen): + res += str(prefix[i]) + "\t" + res += str(item) + #res1 = str(sumLUtil) + " : " + str(period) + self._finalPatterns[res] = [sumLUtil, period] + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b[0], b[1]]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Periodicity']) + return dataFrame
+ + +
+[docs] + def getPatterns(self) -> Dict[str, str]: + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + """ + print("Total number of Fuzzy Periodic-Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in seconds:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: # to include a user specified separator + _ap = FPFPMiner(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: # to consider "\t" as a separator + _ap = FPFPMiner(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + _ap.mine() + print("Total number of Fuzzy Periodic-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + _ap = FPFPMiner('sample.txt', 1, 10, ' ') + _ap.startMine() + _ap.mine() + print("Total number of Fuzzy Periodic-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save('output.txt') + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + print("Error! The number of input parameters do not match the total number of parameters provided") + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/fuzzyPeriodicFrequentPattern/basic/FPFPMiner_old.html b/sphinx/_build/html/_modules/PAMI/fuzzyPeriodicFrequentPattern/basic/FPFPMiner_old.html new file mode 100644 index 000000000..73e741947 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/fuzzyPeriodicFrequentPattern/basic/FPFPMiner_old.html @@ -0,0 +1,889 @@ + + + + + + PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old

+# Sample run of importing the code:
+# -------------------------------------
+#
+#             from PAMI.fuzzyPeriodicFrequentPattern.basic import FPFPMiner as alg
+#
+#             obj =alg.FPFPMiner("input.txt",2,3)
+#
+#             obj.mine()
+#
+#             periodicFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Fuzzy Periodic Frequent Patterns:", len(periodicFrequentPatterns))
+#
+#             obj.save("output.txt")
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+
+from PAMI.fuzzyPeriodicFrequentPattern.basic import abstract as _ab
+from deprecated import deprecated
+
+
+class _FFList:
+    """
+    A class represent a Fuzzy List of an element
+
+    :Attributes:
+
+        item : int
+            the item name
+        sumLUtil : float
+            the sum of utilities of a fuzzy item in database
+        sumRUtil : float
+            the sum of resting values of a fuzzy item in database
+        elements : list
+            list of elements contain tid,Utility and resting values of element in each transaction
+        maxPeriod : int
+            it represents the max period of a item
+
+    :Methods:
+
+        addElement(element)
+            Method to add an element to this fuzzy list and update the sums at the same time.
+        printElement(e)
+            Method to print elements
+
+    """
+
+    def __init__(self, itemName):
+        self.item = itemName
+        self.sumLUtil = 0.0
+        self.sumRUtil = 0.0
+        self.elements = []
+        self.maxPeriod = 0
+
+    def addElement(self, element):
+        """
+        A Method that add a new element to FFList
+
+        :param element: an element to be added to FFList
+        :type element: Element
+        """
+        self.sumLUtil += element.lUtils
+        self.sumRUtil += element.rUtils
+        self.elements.append(element)
+        self.maxPeriod = max(self.maxPeriod, element.period)
+
+    def printElement(self):
+        """
+        A Method to Print elements in the FFList
+        """
+        for ele in self.elements:
+            print(ele.tid, ele.lUtils, ele.rUtils, ele.period)
+
+
+class _Element:
+    """
+    A class represents an Element of a fuzzy list
+
+    :Attributes:
+
+        tid : int
+            keep tact of transaction id
+        lUtils : float
+            the utility of a fuzzy item in the transaction
+        rUtils : float
+            the resting value of a fuzzy item in the transaction
+        period : int
+            represent the period of the element
+    """
+
+    def __init__(self, tid, iUtil, rUtil, period):
+        self.tid = tid
+        self.lUtils = iUtil
+        self.rUtils = rUtil
+        self.period = period
+
+
+class _Regions:
+    """
+    A class calculate the regions
+
+    :Attributes:
+
+        low : int
+            low region value
+        middle : int
+            middle region value
+        high : int
+            high region values
+        """
+
+    def __init__(self, quantity, regionsNumber):
+        self.low = 0
+        self.middle = 0
+        self.high = 0
+        if regionsNumber == 3:  # if we have 3 regions
+            if 0 < quantity <= 1:
+                self.low = 1
+                self.high = 0
+                self.middle = 0
+            elif 1 < quantity <= 6:
+                self.low = float((6 - quantity) / 5)
+                self.middle = float((quantity - 1) / 5)
+                self.high = 0
+            elif 6 < quantity <= 11:
+                self.low = 0
+                self.middle = float((11 - quantity) / 5)
+                self.high = float((quantity - 6) / 5)
+            else:
+                self.low = 0
+                self.middle = 0
+                self.high = 1
+
+
+class _Pair:
+    """
+    A class to store item name and quantity together.
+    """
+
+    def __init__(self):
+        self.item = 0
+        self.quantity = 0
+
+
+
+[docs] +class FPFPMiner(_ab._fuzzyPeriodicFrequentPatterns): + """ + :Description: Fuzzy Periodic Frequent Pattern Miner is desired to find all fuzzy periodic frequent patterns which is + on-trivial and challenging problem to its huge search space.we are using efficient pruning + techniques to reduce the search space. + + :Reference: + + + :param iFile: str : + Name of the Input file to mine complete set of frequent patterns + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float. + :param maxPer: float : + The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the input file to mine complete set of fuzzy spatial frequent patterns + oFile : file + Name of the oFile file to store complete set of fuzzy spatial frequent patterns + minSup : float + The user given support + period : int + periodicity of an element + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime : float + To record the start time of the mining process + endTime : float + To record the completion time of the mining process + itemsCnt : int + To record the number of fuzzy spatial itemSets generated + mapItemsLowSum : map + To keep track of low region values of items + mapItemsMidSum : map + To keep track of middle region values of items + mapItemsHighSum : map + To keep track of high region values of items + mapItemSum : map + To keep track of sum of Fuzzy Values of items + mapItemRegions : map + To Keep track of fuzzy regions of item + joinsCnt : int + To keep track of the number of FFI-list that was constructed + BufferSize : int + represent the size of Buffer + itemSetBuffer list + to keep track of items in buffer + maxTID : int + represent the maximum tid of the database + lastTIDs : map + represent the last tid of fuzzy items + itemsToRegion : map + represent items with respective regions + + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + convert(value) + To convert the given user specified value + FSFIMining( prefix, prefixLen, fsFim, minSup) + Method generate FFI from prefix + construct(px, py) + A function to construct Fuzzy itemSet from 2 fuzzy itemSets + findElementWithTID(UList, tid) + To find element with same tid as given + WriteOut(prefix, prefixLen, item, sumIUtil,period) + To Store the patten + + **Executing the code on terminal :** + --------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 FPFPMiner_old.py <inputFile> <outputFile> <minSup> <maxPer> <sep> + + Example Usage: + + (.venv) $ python3 FPFPMiner_old.py sampleTDB.txt output.txt 2 3 + + .. note:: minSup will be considered in percentage of database transactions + + + **Sample run of importing the code:** + -------------------------------------- + + from PAMI.fuzzyPeriodicFrequentPattern.basic import FPFPMiner as alg + + obj =alg.FPFPMiner("input.txt",2,3) + + obj.mine() + + periodicFrequentPatterns = obj.getPatterns() + + print("Total number of Fuzzy Periodic Frequent Patterns:", len(periodicFrequentPatterns)) + + obj.save("output.txt") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + --------------- + The complete program was written by Sai Chitra.B under the supervision of Professor Rage Uday Kiran. + + """ + _startTime = float() + _endTime = float() + _minSup = str() + _maxPer = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _memoryUSS = float() + _memoryRSS = float() + _sep = " " + _Database = [] + _transactions = [] + _fuzzyValues = [] + _ts = [] + + def __init__(self, iFile, minSup, period, sep="\t"): + super().__init__(iFile, minSup, period, sep) + self._oFile = "" + self._BufferSize = 200 + self._itemSetBuffer = [] + self._mapItemRegions = {} + self._mapItemSum = {} + self._mapItemsHighSum = {} + self._finalPatterns = {} + self._joinsCnt = 0 + self._itemsCnt = 0 + self._mapItemMidSum = {} + self._startTime = float() + self._endTime = float() + self._mapItemsLowSum = {} + self._memoryUSS = float() + self._memoryRSS = float() + self._dbLen = 0 + + def _compareItems(self, o1, o2): + """ + A Function that sort all FFI-list in ascending order of Support + + :param o1: First FFI-list + :type o1: _FFList + :param o2: Second FFI-list + :type o2: _FFList + :return: Comparison Value + :rtype: int + """ + compare = self._mapItemSum[o1.item] - self._mapItemSum[o2.item] + if compare == 0: + return int(o1.item) - int(o2.item) + else: + return compare + + def _convert(self, value): + """ + To convert the given user specified value + + :param value: user specified value + :type value: int or float or str + :return: converted value + :rtype: float + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (self._dbLen * value) + if type(value) is str: + if '.' in value: + value = (self._dbLen * value) + else: + value = int(value) + return value + + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + data, self._transactions, self._fuzzyValues, ts = [], [], [], [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + self._ts = self._iFile['TS'].tolist() + if 'Transactions' in i: + self._transactions = self._iFile['Transactions'].tolist() + if 'fuzzyValues' in i: + self._fuzzyValues = self._iFile['fuzzyValues'].tolist() + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + count = 0 + for line in data: + line = line.decode("utf-8") + line = line.split("\n")[0] + parts = line.split(":") + parts[0] = parts[0].strip() + parts[2] = parts[2].strip() + items = parts[0].split(self._sep) + quantities = parts[2].split(self._sep) + self._ts.append(count) + self._transactions.append([x for x in items]) + self._fuzzyValues.append([x for x in quantities]) + count += 1 + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + count = 0 + for line in f: + line = line.split("\n")[0] + parts = line.split(":") + parts[0] = parts[0].strip() + parts[2] = parts[2].strip() + items = parts[0].split(self._sep) + quantities = parts[2].split(self._sep) + self._ts.append(count) + self._transactions.append([x for x in items]) + self._fuzzyValues.append([x for x in quantities]) + count += 1 + except IOError: + print("File Not Found") + quit() + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + Fuzzy periodic Frequent pattern mining process will start from here + """ + self.mine()
+ + +
+[docs] + def mine(self): + """ + Fuzzy periodic Frequent pattern mining process will start from here + """ + maxTID = 0 + lastTIDs = {} + self._startTime = _ab._time.time() + self._creatingItemSets() + self._finalPatterns = {} + tid = int() + for line in range(len(self._transactions)): + tid = int(self._ts[line]) + self._dbLen += 1 + items = self._transactions[line] + quantities = self._fuzzyValues[line] + if tid < maxTID: + maxTID = tid + for i in range(0, len(items)): + regions = _Regions(int(quantities[i]), 3) + item = items[i] + if item in self._mapItemsLowSum.keys(): + low = self._mapItemsLowSum[item] + low += regions.low + self._mapItemsLowSum[item] = low + else: + self._mapItemsLowSum[item] = regions.low + if item in self._mapItemMidSum.keys(): + mid = self._mapItemMidSum[item] + mid += regions.middle + self._mapItemMidSum[item] = mid + else: + self._mapItemMidSum[item] = regions.middle + if item in self._mapItemsHighSum.keys(): + high = self._mapItemsHighSum[item] + high += regions.high + self._mapItemsHighSum[item] = high + else: + self._mapItemsHighSum[item] = regions.high + listOfFFIList = [] + mapItemsToFFLIST = {} + itemsToRegion = {} + self._minSup = self._convert(self._minSup) + self._maxPer = self._convert(self._maxPer) + for item1 in self._mapItemsLowSum.keys(): + item = item1 + low = self._mapItemsLowSum[item] + mid = self._mapItemMidSum[item] + high = self._mapItemsHighSum[item] + if low >= mid and low >= high: + self._mapItemSum[item] = low + self._mapItemRegions[item] = "L" + itemsToRegion[item] = "L" + elif mid >= low and mid >= high: + self._mapItemSum[item] = mid + self._mapItemRegions[item] = "M" + itemsToRegion[item] = "M" + elif high >= low and high >= mid: + self._mapItemRegions[item] = "H" + self._mapItemSum[item] = high + itemsToRegion[item] = "H" + if self._mapItemSum[item] >= self._minSup: + fUList = _FFList(item) + k = tuple([item, itemsToRegion.get(item)]) + mapItemsToFFLIST[k] = fUList + listOfFFIList.append(fUList) + lastTIDs[item] = tid + listOfFFIList.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + for line in range(len(self._transactions)): + tid = int(self._ts[line]) + items = self._transactions[line] + quantities = self._fuzzyValues[line] + revisedTransaction = [] + for i in range(0, len(items)): + pair = _Pair() + pair.item = items[i] + regions = _Regions(int(quantities[i]), 3) + item = pair.item + if self._mapItemSum[item] >= self._minSup: + if self._mapItemRegions[pair.item] == "L": + pair.quantity = regions.low + elif self._mapItemRegions[pair.item] == "M": + pair.quantity = regions.middle + elif self._mapItemRegions[pair.item] == "H": + pair.quantity = regions.high + if pair.quantity > 0: + revisedTransaction.append(pair) + revisedTransaction.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + for i in range(len(revisedTransaction) - 1, -1, -1): + pair = revisedTransaction[i] + remainUtil = 0 + for j in range(len(revisedTransaction) - 1, i - 1, -1): + remainUtil += revisedTransaction[j].quantity + if pair.quantity > remainUtil: + remainingUtility = pair.quantity + else: + remainingUtility = remainUtil + if mapItemsToFFLIST.get(tuple([pair.item, itemsToRegion[pair.item]])) is not None: + FFListOfItem = mapItemsToFFLIST[tuple([pair.item, itemsToRegion[pair.item]])] + if len(FFListOfItem.elements) == 0: + element = _Element(tid, pair.quantity, remainingUtility, 0) + else: + if lastTIDs[pair.item] == tid: + element = _Element(tid, pair.quantity, remainingUtility, maxTID - tid) + else: + lastTid = FFListOfItem.elements[-1].tid + curPer = tid - lastTid + element = _Element(tid, pair.quantity, remainingUtility, curPer) + FFListOfItem.addElement(element) + self._FSFIMining(self._itemSetBuffer, 0, listOfFFIList, self._minSup) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + + def _FSFIMining(self, prefix, prefixLen, fsFim, minSup): + + """ + Generates FPFP from prefix + + :param prefix: the prefix patterns of FPFP + :type prefix: len + :param prefixLen: the length of prefix + :type prefixLen: int + :param fsFim: the Fuzzy list of prefix itemSets + :type fsFim: list + :param minSup: the minimum support of + :type minSup:int + """ + for i in range(0, len(fsFim)): + X = fsFim[i] + if X.sumLUtil >= minSup and X.maxPeriod <= self._maxPer: + self._WriteOut(prefix, prefixLen, X.item, X.sumLUtil, X.maxPeriod) + if X.sumRUtil >= minSup: + exULs = [] + for j in range(i + 1, len(fsFim)): + Y = fsFim[j] + exULs.append(self._construct(X, Y)) + self._joinsCnt += 1 + self._itemSetBuffer.insert(prefixLen, X.item) + self._FSFIMining(self._itemSetBuffer, prefixLen + 1, exULs, minSup, ) + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime - self._startTime
+ + + def _construct(self, px, py): + """ + A function to construct a new Fuzzy item set from 2 fuzzy itemSets + + :param px: the item set px + :type px: FFI-List + :param py: item set py + :type py: FFI-List + :return: the item set of pxy(px and py) + :rtype: FFI-List + """ + pxyUL = _FFList(py.item) + prev = 0 + for ex in px.elements: + ey = self._findElementWithTID(py, ex.tid) + if ey is None: + continue + eXY = _Element(ex.tid, min([ex.lUtils, ey.lUtils], key=lambda x: float(x)), ey.rUtils, ex.tid - prev) + pxyUL.addElement(eXY) + prev = ex.tid + return pxyUL + + def _findElementWithTID(self, UList, tid): + """ + To find element with same tid as given + + :param UList: fuzzy list + :type UList: FFI-List + :param tid: transaction id + :type tid: int + :return: element with tid as given + :rtype: element if exist or None + """ + List = UList.elements + first = 0 + last = len(List) - 1 + while first <= last: + mid = (first + last) >> 1 + if List[mid].tid < tid: + first = mid + 1 + elif List[mid].tid > tid: + last = mid - 1 + else: + return List[mid] + return None + + def _WriteOut(self, prefix, prefixLen, item, sumLUtil, period): + """ + To Store the patten + + :param prefix: prefix of itemSet + :type prefix: list + :param prefixLen: length of prefix + :type prefixLen: int + :param item: the last item + :type item: int + :param sumLUtil: sum of utility of itemSet + :type sumLUtil: float + :param period: represent the period of itemSet + :type period: int + """ + self._itemsCnt += 1 + res = "" + for i in range(0, prefixLen): + res += str(prefix[i]) + "." + str(self._mapItemRegions[prefix[i]]) + "\t" + res += str(item) + "." + str(self._mapItemRegions.get(item)) + #res1 = str(sumLUtil) + " : " + str(period) + self._finalPatterns[res] = [sumLUtil, period] + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b[0], b[1]]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Periodicity']) + return dataFrame
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv ile + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Total number of Fuzzy Periodic-Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in seconds:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: # to include a user specified separator + _ap = FPFPMiner(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: # to consider "\t" as a separator + _ap = FPFPMiner(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + _ap.mine() + print("Total number of Fuzzy Periodic-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/geoReferencedPeriodicFrequentPattern/basic/GPFPMiner.html b/sphinx/_build/html/_modules/PAMI/geoReferencedPeriodicFrequentPattern/basic/GPFPMiner.html new file mode 100644 index 000000000..1423ae8c2 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/geoReferencedPeriodicFrequentPattern/basic/GPFPMiner.html @@ -0,0 +1,719 @@ + + + + + + PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner

+# GPFPMiner is a Extension of ECLAT algorithm,which  stands for Equivalence Class Clustering and bottom-up
+# Lattice Traversal to mine the geo referenced peridoic frequent patterns.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             import PAMI.geoReferencedPeridicFrequentPattern.GPFPMiner as alg
+#
+#             obj = alg.GPFPMiner("sampleTDB.txt", "sampleN.txt", 5, 3)
+#
+#             obj.mine()
+#
+#             Patterns = obj.getPatterns()
+#
+#             print("Total number of Geo Referenced Periodic-Frequent Patterns:", len(Patterns))
+#
+#             obj.save("outFile")
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from  PAMI.geoReferencedPeriodicFrequentPattern.basic import abstract as _ab
+from deprecated import deprecated
+
+
+
+[docs] +class GPFPMiner(_ab._geoReferencedPeriodicFrequentPatterns): + """ + :Description: GPFPMiner is an Extension of ÉCLAT algorithm,which stands for Equivalence Class Clustering and + bottom-up Lattice Traversal to mine the geo referenced periodic frequent patterns. + + :Reference: + + :param iFile: str + Name of the Input file to mine complete set of Geo-referenced periodic frequent patterns + :param oFile: str + Name of the output file to store complete set of Geo-referenced periodic frequent patterns + :param minSup: int or float or str + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float. + :param maxPer: float + The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + :param nFile: str + Name of the input file to mine complete set of Geo-referenced periodic frequent patterns + :param sep: str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : str + Input file name or path of the input file + nFile : str + Name of Neighbourhood file name + minSup : float or int or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + maxPer : float or int or str + The user can specify maxPer either in count or proportion of database size. + If the program detects the data type of maxPer is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default separator is tab space or \t. + However, the users can override their default separator. + startTime : float + To record the start time of the mining process + endTime : float + To record the completion time of the mining process + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + oFile : str + Name of the output file to store complete set of frequent patterns + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + Database : list + To store the complete set of transactions available in the input database/file + + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrames() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets(iFileName) + Storing the complete transactions of the database/input file in a database variable + frequentOneItem() + Generating one frequent patterns + convert(value) + To convert the given user specified value + getNeighbourItems(keySet) + A function to get common neighbours of a itemSet + mapNeighbours(file) + A function to map items to their neighbours + + **Executing the code on terminal :** + ---------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 GPFPMiner.py <inputFile> <outputFile> <neighbourFile> <minSup> <maxPer> + + Example Usage: + + (.venv) $ python3 GPFPMiner.py sampleTDB.txt output.txt sampleN.txt 0.5 0.3 + + .. note:: minSup & maxPer will be considered in percentage of database transactions + + + + **Sample run of importing the code :** + ----------------------------------------- + .. code-block:: python + + import PAMI.geoReferencedPeridicFrequentPattern.GPFPMiner as alg + + obj = alg.GPFPMiner("sampleTDB.txt", "sampleN.txt", 5, 3) + + obj.mine() + + Patterns = obj.getPatterns() + + print("Total number of Geo Referenced Periodic-Frequent Patterns:", len(Patterns)) + + obj.save("outFile") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + -------------- + The complete program was written by P.RaviKumar under the supervision of Professor Rage Uday Kiran. + """ + + _minSup = " " + _maxPer = " " + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _nFile = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _sep = "\t" + _lno = 0 + + def __init__(self, iFile, nFile, minSup, maxPer, sep="\t"): + super().__init__(iFile, nFile, minSup, maxPer, sep) + self._NeighboursMap = {} + + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + data, ts = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + ts = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + for i in range(len(data)): + tr = [ts[i][0]] + tr = tr + data[i] + self._Database.append(tr) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.rstrip() + temp = [i.strip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + # function to get frequent one pattern + def _frequentOneItem(self): + """ + Generating one frequent patterns + """ + + candidate = {} + for i in self._Database: + self._lno += 1 + n = int(i[0]) + for j in i[1:]: + if j not in candidate: + candidate[j] = [1, abs(0-n), n, [n]] + else: + candidate[j][0] += 1 + candidate[j][1] = max(candidate[j][1], abs(n - candidate[j][2])) + candidate[j][2] = n + candidate[j][3].append(n) + self._minSup = self._convert(self._minSup) + self._maxPer = self._convert(self._maxPer) + #print(self._minSup, self._maxPer) + self._tidList = {k: v[3] for k, v in candidate.items() if v[0] >= self._minSup and v[1] <= self._maxPer} + candidate = {k: [v[0], v[1]] for k, v in candidate.items() if v[0] >= self._minSup and v[1] <= self._maxPer} + plist = [key for key, value in sorted(candidate.items(), key=lambda x: (x[1][0], x[0]), reverse=True)] + return plist + + def _convert(self, value): + """ + To convert the given user specified value + + :param value: user specified value + + :type value: int or float or str + + :return: converted value + + :rtype: float + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + def _getSupportAndPeriod(self, timeStamps): + """ + calculates the support and periodicity with list of timestamps + + :param timeStamps: timestamps of a pattern + :type timeStamps: list + """ + timeStamps.sort() + cur = 0 + per = 0 + sup = 0 + for j in range(len(timeStamps)): + per = max(per, timeStamps[j] - cur) + if per > self._maxPer: + return [0, 0] + cur = timeStamps[j] + sup += 1 + per = max(per, self._lno - cur) + return [sup, per] + + def _save(self, prefix, suffix, tidSetX): + """ + Saves the patterns that satisfy the periodic frequent property. + + :param prefix: the prefix of a pattern + :type prefix: list or None + :param suffix: the suffix of a patterns + :type suffix: list + :param tidSetX: the timestamp of a patterns + :type tidSetX: list + + + """ + if prefix == None: + prefix = suffix + else: + prefix = prefix + suffix + val = self._getSupportAndPeriod(tidSetX) + if val[0] >= self._minSup and val[1] <= self._maxPer: + self._finalPatterns[tuple(prefix)] = val + + def _Generation(self, prefix, itemSets, tidSets): + """ + Generates the patterns that satisfy the periodic frequent property. + + :param prefix: the prefix of a pattern + :type prefix: list or None + :param itemSets: the item sets of a patterns + :type itemSets: list + :param tidSets: the timestamp of a patterns + :type tidSets: list + """ + if len(itemSets) == 1: + i = itemSets[0] + tidI = tidSets[0] + self._save(prefix, [i], tidI) + return + for i in range(len(itemSets)): + itemX = itemSets[i] + if itemX == None: + continue + tidSetX = tidSets[i] + classItemSets = [] + classTidSets = [] + itemSetX = [itemX] + neighboursItemsI = self._getNeighbourItems(itemSets[i]) + for j in range(i + 1, len(itemSets)): + neighboursItemsJ = self._getNeighbourItems(itemSets[i]) + if not itemSets[j] in neighboursItemsI: + continue + itemJ = itemSets[j] + tidSetJ = tidSets[j] + y = list(set(tidSetX).intersection(tidSetJ)) + if len(y) >= self._minSup: + ne = list(set(neighboursItemsI).intersection(neighboursItemsJ)) + x = [] + x = x + [itemX] + x = x + [itemJ] + self._NeighboursMap[tuple(x)] = ne + classItemSets.append(itemJ) + classTidSets.append(y) + newPrefix = list(set(itemSetX)) + prefix + self._Generation(newPrefix, classItemSets, classTidSets) + self._save(prefix, list(set(itemSetX)), tidSetX) + + def _getNeighbourItems(self, keySet): + """ + A function to get Neighbours of a item + + :param keySet: itemSet + :type keySet: str or tuple + :return: set of common neighbours + :rtype: set + """ + itemNeighbours = self._NeighboursMap.keys() + if isinstance(keySet, str): + if self._NeighboursMap.get(keySet) is None: + return [] + itemNeighbours = list(set(itemNeighbours).intersection(set(self._NeighboursMap.get(keySet)))) + if isinstance(keySet, tuple): + keySet = list(keySet) + for j in range(0, len(keySet)): + i = keySet[j] + itemNeighbours = list(set(itemNeighbours).intersection(set(self._NeighboursMap.get(i)))) + return itemNeighbours + +
+[docs] + def mapNeighbours(self): + """ + A function to map items to their Neighbours + """ + self._NeighboursMap = {} + if isinstance(self._nFile, _ab._pd.DataFrame): + data = [] + if self._nFile.empty: + print("its empty..") + i = self._nFile.columns.values.tolist() + if 'Neighbours' in i: + data = self._nFile['Neighbours'].tolist() + for i in data: + self._NeighboursMap[i[0]] = i[1:] + if isinstance(self._nFile, str): + if _ab._validators.url(self._nFile): + data = _ab._urlopen(self._nFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._NeighboursMap[temp[0]] = temp[1:] + else: + try: + with open(self._nFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._NeighboursMap[temp[0]] = temp[1:] + except IOError: + print("File Not Found") + quit()
+ + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + Frequent pattern mining process will start from here + """ + + self.mine()
+ + +
+[docs] + def mine(self): + """ + Frequent pattern mining process will start from here + """ + + # global items_sets, endTime, startTime + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + self._creatingItemSets() + self._minSup = self._convert(self._minSup) + self.mapNeighbours() + self._finalPatterns = {} + plist = self._frequentOneItem() + for i in range(len(plist)): + itemX = plist[i] + tidSetX = self._tidList[itemX] + itemSetX = [itemX] + itemSets = [] + tidSets = [] + neighboursItems = self._getNeighbourItems(plist[i]) + for j in range(i + 1, len(plist)): + if not plist[j] in neighboursItems: + continue + itemJ = plist[j] + tidSetJ = self._tidList[itemJ] + y1 = list(set(tidSetX).intersection(tidSetJ)) + if len(y1) >= self._minSup: + itemSets.append(itemJ) + tidSets.append(y1) + self._Generation(itemSetX, itemSets, tidSets) + self._save(None, itemSetX, tidSetX) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Spatial Periodic Frequent patterns were generated successfully using SpatialEclat algorithm")
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + pat = "" + for i in a: + pat += str(i) + "\t" + data.append([pat, b[0], b[1]]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Period']) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to a output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + pat = "" + for i in x: + pat += str(i) + "\t" + patternsAndSupport = pat + ": " + str(y[0]) + ": " + str(y[1]) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Total number of Spatial Periodic-Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in seconds:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 6 or len(_ab._sys.argv) == 7: + if len(_ab._sys.argv) == 7: + _ap = GPFPMiner(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5], _ab._sys.argv[6]) + if len(_ab._sys.argv) == 6: + _ap = GPFPMiner(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + _ap.startMine() + _ap.mine() + print("Total number of Spatial Periodic-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/georeferencedFrequentPattern/basic/SpatialECLAT.html b/sphinx/_build/html/_modules/PAMI/georeferencedFrequentPattern/basic/SpatialECLAT.html new file mode 100644 index 000000000..5ecf95cf6 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/georeferencedFrequentPattern/basic/SpatialECLAT.html @@ -0,0 +1,687 @@ + + + + + + PAMI.georeferencedFrequentPattern.basic.SpatialECLAT — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.georeferencedFrequentPattern.basic.SpatialECLAT

+#  SpatialEclat is an Extension of ECLAT algorithm,which  stands for Equivalence Class Clustering and bottom-up
+#  Lattice Traversal.It is one of the popular methods of Association Rule mining. It is a more efficient and
+#  scalable version of the Apriori algorithm.
+#
+#  **Importing this algorithm into a python program**
+#  ---------------------------------------------------
+#
+#             from PAMI.georeferencedFrequentPattern.basic import SpatialECLAT as alg
+#
+#             obj = alg.SpatialECLAT("sampleTDB.txt", "sampleN.txt", 5)
+#
+#             obj.mine()
+#
+#             spatialFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Spatial Frequent Patterns:", len(spatialFrequentPatterns))
+#
+#             obj.save("outFile")
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.georeferencedFrequentPattern.basic import abstract as _ab
+from deprecated import deprecated
+
+
+
+[docs] +class SpatialECLAT(_ab._spatialFrequentPatterns): + """ + :Description: Spatial Eclat is a Extension of ECLAT algorithm,which stands for Equivalence Class Clustering and bottom-up + Lattice Traversal.It is one of the popular methods of Association Rule mining. It is a more efficient and + scalable version of the Apriori algorithm. + + :Reference: Rage, Uday & Fournier Viger, Philippe & Zettsu, Koji & Toyoda, Masashi & Kitsuregawa, Masaru. (2020). + Discovering Frequent Spatial Patterns in Very Large Spatiotemporal Databases. + + :param iFile: str : + Name of the Input file to mine complete set of Geo-referenced frequent patterns + :param oFile: str : + Name of the output file to store complete set of Geo-referenced frequent patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float. + :param maxPer: float : + The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + :param nFile: str : + Name of the input file to mine complete set of Geo-referenced frequent patterns + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : str + Input file name or path of the input file + nFile : str + Name of Neighbourhood file name + minSup : int or float or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + startTime : float + To record the start time of the mining process + endTime : float + To record the completion time of the mining process + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + oFile : str + Name of the output file to store complete set of frequent patterns + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + Database : list + To store the complete set of transactions available in the input database/file + + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets(iFileName) + Storing the complete transactions of the database/input file in a database variable + frequentOneItem() + Generating one frequent patterns + dictKeysToInt(iList) + Converting dictionary keys to integer elements + eclatGeneration(cList) + It will generate the combinations of frequent items + generateSpatialFrequentPatterns(tidList) + It will generate the combinations of frequent items from a list of items + convert(value) + To convert the given user specified value + getNeighbourItems(keySet) + A function to get common neighbours of a itemSet + mapNeighbours(file) + A function to map items to their neighbours + + **Executing the code on terminal :** + ---------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 SpatialECLAT.py <inputFile> <outputFile> <neighbourFile> <minSup> + + Example Usage: + + (.venv) $ python3 SpatialECLAT.py sampleTDB.txt output.txt sampleN.txt 0.5 + + .. note:: minSup will be considered in percentage of database transactions + + + + **Sample run of importing the code :** + ------------------------------------------ + .. code-block:: python + + from PAMI.georeferencedFrequentPattern.basic import SpatialECLAT as alg + + obj = alg.SpatialECLAT("sampleTDB.txt", "sampleN.txt", 5) + + obj.mine() + + spatialFrequentPatterns = obj.getPatterns() + + print("Total number of Spatial Frequent Patterns:", len(spatialFrequentPatterns)) + + obj.save("outFile") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + + **Credits:** + ---------------- + The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran. + """ + + _minSup = float() + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _nFile = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _sep = "\t" + + def __init__(self, iFile, nFile, minSup, sep="\t"): + super().__init__(iFile, nFile, minSup, sep) + self._NeighboursMap = {} + + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + if 'Patterns' in i: + self._Database = self._iFile['Patterns'].tolist() + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + # function to get frequent one pattern + def _frequentOneItem(self): + """ + Generating one frequent patterns + """ + self._finalPatterns = {} + candidate = {} + for i in range(len(self._Database)): + for j in range(len(self._Database[i])): + if self._Database[i][j] not in candidate: + candidate[self._Database[i][j]] = [i] + else: + candidate[self._Database[i][j]] += [i] + self._finalPatterns = {keys: value for keys, value in candidate.items() if len(value) >= self._minSup} + + def _convert(self, value): + """ + To convert the given user specified value + + :param value: user specified value + :type value: int or float or str + :return: converted value + :rtype: float + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + @staticmethod + def _dictKeysToInt(iList): + """ + Converting dictionary keys to integer elements + + :param iList: Dictionary with patterns as keys and their support count as a value + :type iList: dict + :returns: list of integer patterns to represent dictionary keys + :rtype: list + """ + + temp = [] + for ite in iList.keys(): + ite = [int(i) for i in ite.strip('[]').split('\t')] + temp.append(ite) + # print(sorted(temp)) + return sorted(temp) + + def _eclatGeneration(self, cList): + """It will generate the combinations of frequent items + + :param cList :it represents the items with their respective transaction identifiers + :type cList: dictionary + :return: returning transaction dictionary + :rtype: dict + """ + # to generate all + tidList = {} + key = list(cList.keys()) + for i in range(0, len(key)): + NeighboursItems = self._getNeighbourItems(key[i]) + for j in range(i + 1, len(key)): + # print(c[key[i]],c[key[j]]) + if not key[j] in NeighboursItems: + continue + intersectionList = list(set(cList[key[i]]).intersection(set(cList[key[j]]))) + itemList = [] + itemList += key[i] + itemList += key[j] + if len(intersectionList) >= self._minSup: + itemList.sort() + if tuple(itemList) not in tidList: + tidList[tuple(set(itemList))] = intersectionList + return tidList + + def _generateSpatialFrequentPatterns(self, tidList): + """ + It will generate the combinations of frequent items from a list of items + + :param tidList: it represents the items with their respective transaction identifiers + :type tidList: dictionary + :return: returning transaction dictionary + :rtype: dict + """ + tidList1 = {} + if len(tidList) == 0: + print("There are no more candidate sets") + else: + key = list(tidList.keys()) + for i in range(0, len(key)): + NeighboursItems = self._getNeighbourItems(key[i]) + for j in range(i + 1, len(key)): + if not key[j] in NeighboursItems: + continue + intersectionList = list(set(tidList[key[i]]).intersection(set(tidList[key[j]]))) + itemList = [] + if len(intersectionList) >= self._minSup: + itemList += key[i], key[j] + itemList.sort() + tidList1[tuple(itemList)] = intersectionList + + return tidList1 + + def _getNeighbourItems(self, keySet): + """ + A function to get Neighbours of a item + + :param keySet: itemSet + :type keySet: str or tuple + :return: set of common neighbours + :rtype: set + """ + itemNeighbours = self._NeighboursMap.keys() + if isinstance(keySet, str): + if self._NeighboursMap.get(keySet) is None: + return [] + itemNeighbours = list(set(itemNeighbours).intersection(set(self._NeighboursMap.get(keySet)))) + if isinstance(keySet, tuple): + keySet = list(keySet) + # print(keySet) + for j in range(0, len(keySet)): + i = keySet[j] + itemNeighbours = list(set(itemNeighbours).intersection(set(self._NeighboursMap.get(i)))) + return itemNeighbours + + def _mapNeighbours(self): + """ + A function to map items to their Neighbours + """ + self._NeighboursMap = {} + if isinstance(self._nFile, _ab._pd.DataFrame): + data, items = [], [] + if self._nFile.empty: + print("its empty..") + i = self._nFile.columns.values.tolist() + if 'item' in i: + items = self._nFile['items'].tolist() + if 'Neighbours' in i: + data = self._nFile['Neighbours'].tolist() + for k in range(len(items)): + self._NeighboursMap[items[k]] = data[k] + # print(self.Database) + if isinstance(self._nFile, str): + if _ab._validators.url(self._nFile): + data = _ab._urlopen(self._nFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._NeighboursMap[temp[0]] = temp[1:] + else: + try: + with open(self._nFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._NeighboursMap[temp[0]] = temp[1:] + except IOError: + print("File Not Found") + quit() + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + Frequent pattern mining process will start from here + """ + + self.mine()
+ + +
+[docs] + def mine(self): + """ + Frequent pattern mining process will start from here + """ + + # global items_sets, endTime, startTime + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + self._creatingItemSets() + self._minSup = self._convert(self._minSup) + self._mapNeighbours() + self._finalPatterns = {} + self._frequentOneItem() + frequentSet = self._generateSpatialFrequentPatterns(self._finalPatterns) + for x, y in frequentSet.items(): + if x not in self._finalPatterns: + self._finalPatterns[x] = y + while 1: + frequentSet = self._eclatGeneration(frequentSet) + for x, y in frequentSet.items(): + if x not in self._finalPatterns: + self._finalPatterns[x] = y + if len(frequentSet) == 0: + break + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Spatial Frequent patterns were generated successfully using SpatialECLAT algorithm")
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + pat = str() + if type(a) == str: + pat = a + if type(a) == list: + for i in a: + pat = pat + a + ' ' + data.append([pat.strip(), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to a output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + pat = str() + if type(x) == str: + pat = x + if type(x) == list: + for i in x: + pat = pat + x + '\t' + patternsAndSupport = pat.strip() + ":" + str(len(y)) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Total number of Spatial Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = SpatialECLAT(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = SpatialECLAT(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + _ap.mine() + print("Total number of Spatial Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/georeferencedPartialPeriodicPattern/basic/STEclat.html b/sphinx/_build/html/_modules/PAMI/georeferencedPartialPeriodicPattern/basic/STEclat.html new file mode 100644 index 000000000..22c80d8e0 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/georeferencedPartialPeriodicPattern/basic/STEclat.html @@ -0,0 +1,709 @@ + + + + + + PAMI.georeferencedPartialPeriodicPattern.basic.STEclat — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.georeferencedPartialPeriodicPattern.basic.STEclat

+# STEclat is one of the fundamental algorithm to discover geo refereneced partial periodic-frequent patterns in a transactional database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             import PAMI.georeferencedPartialPeriodicPattern.STEclat as alg
+#
+#             obj = alg.STEclat("sampleTDB.txt", "sampleN.txt", 3, 4)
+#
+#             obj.mine()
+#
+#             partialPeriodicSpatialPatterns = obj.getPatterns()
+#
+#             print("Total number of Periodic Spatial Frequent Patterns:", len(partialPeriodicSpatialPatterns))
+#
+#             obj.save("outFile")
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+
+from PAMI.georeferencedPartialPeriodicPattern.basic import abstract as _ab
+from deprecated import deprecated
+
+
+
+[docs] +class STEclat(_ab._partialPeriodicSpatialPatterns): + """ + :Description: STEclat is one of the fundamental algorithm to discover georefereneced partial periodic-frequent patterns in a transactional database. + + :Reference: R. Uday Kiran, C. Saideep, K. Zettsu, M. Toyoda, M. Kitsuregawa and P. Krishna Reddy, + "Discovering Partial Periodic Spatial Patterns in Spatiotemporal Databases," 2019 IEEE International + Conference on Big Data (Big Data), 2019, pp. 233-238, doi: 10.1109/BigData47090.2019.9005693. + + :param iFile: str : + Name of the Input file to mine complete set of Geo-referenced Partial Periodic patterns + :param oFile: str : + Name of the output file to store complete set of Geo-referenced Partial Periodic patterns + :param minPS: int or float or str : + The user can specify minPS either in count or proportion of database size. If the program detects the data type of minPS is integer, then it treats minPS is expressed in count. Otherwise, it will be treated as float. + :param maxIAT: int or float or str : + The user can specify maxIAT either in count or proportion of database size. If the program detects the data type of maxIAT is integer, then it treats maxIAT is expressed in count. Otherwise, it will be treated as float. + :param nFile: str : + Name of the input file to mine complete set of Geo-referenced Partial Periodic patterns + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + iFile : str + Input file name or path of the input file + nFile : str + Name of Neighbourhood file name + maxIAT : float or int or str + The user can specify maxIAT either in count or proportion of database size. + If the program detects the data type of maxIAT is integer, then it treats maxIAT is expressed in count. + Otherwise, it will be treated as float. + Example: maxIAT=10 will be treated as integer, while maxIAT=10.0 will be treated as float + minPS : float or int or str + The user can specify minPS either in count or proportion of database size. + If the program detects the data type of minPS is integer, then it treats minPS is expressed in count. + Otherwise, it will be treated as float. + Example: minPS=10 will be treated as integer, while minPS=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default separator is tab space or \t. + However, the users can override their default separator. + startTime : float + To record the start time of the mining process + endTime : float + To record the completion time of the mining process + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + oFile : str + Name of the output file to store complete set of frequent patterns + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + Database : list + To store the complete set of transactions available in the input database/file + + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrames() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets(iFileName) + Storing the complete transactions of the database/input file in a database variable + frequentOneItem() + Generating one frequent patterns + convert(value): + To convert the given user specified value + getNeighbourItems(keySet) + A function to get common neighbours of a itemSet + mapNeighbours(file) + A function to map items to their neighbours + + **Executing the code on terminal :** + ---------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 STEclat.py <inputFile> <outputFile> <neighbourFile> <minPS> <maxIAT> + + Example Usage: + + (.venv) $ python3 STEclat.py sampleTDB.txt output.txt sampleN.txt 0.2 0.5 + + .. note:: maxIAT & minPS will be considered in percentage of database transactions + + + **Sample run of importing the code :** + -------------------------------------- + .. code-block:: python + + import PAMI.georeferencedPartialPeriodicPattern.STEclat as alg + + obj = alg.STEclat("sampleTDB.txt", "sampleN.txt", 3, 4) + + obj.mine() + + partialPeriodicSpatialPatterns = obj.getPatterns() + + print("Total number of Periodic Spatial Frequent Patterns:", len(partialPeriodicSpatialPatterns)) + + obj.save("outFile") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + ------------- + The complete program was written by P. Likhitha under the supervision of Professor Rage Uday Kiran. + """ + + _maxIAT = " " + _minPS = " " + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _nFile = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _sep = "\t" + _lno = 0 + + def __init__(self, iFile, nFile, minPS, maxIAT, sep="\t"): + super().__init__(iFile, nFile, minPS, maxIAT, sep) + self._NeighboursMap = {} + + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + data, ts = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + ts = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + for i in range(len(data)): + tr = [ts[i][0]] + tr = tr + data[i] + self._Database.append(tr) + + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + # function to get frequent one pattern + def _frequentOneItem(self): + """ + Generating one frequent patterns + """ + self._tidList = {} + self._mapSupport = {} + self._maxIAT = self._convert(self._maxIAT) + for line in self._Database: + s = line + n = int(s[0]) + for i in range(1, len(s)): + si = s[i] + if self._mapSupport.get(si) is None: + self._mapSupport[si] = [0, n] + self._tidList[si] = [n] + else: + lp = n - self._mapSupport[si][1] + if lp <= self._maxIAT: + self._mapSupport[si][0] += 1 + self._mapSupport[si][1] = n + self._tidList[si].append(n) + self._minPS = self._convert(self._minPS) + self._mapSupport = {k: v[0] for k, v in self._mapSupport.items() if v[0] >= self._minPS} + plist = [key for key, value in sorted(self._mapSupport.items(), key=lambda x: x[1], reverse=True)] + return plist + + def _convert(self, value): + """ + To convert the given user specified value + + :param value: user specified value + + :type value: int or float or str + + :return: converted value + + :rtype: float + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + def _getPeriodicSupport(self, timeStamps): + """ + calculates the support and periodicity with list of timestamps + + :param timeStamps: timestamps of a pattern + :type timeStamps: list + """ + timeStamps.sort() + per = 0 + for i in range(len(timeStamps) - 1): + j = i + 1 + if abs(timeStamps[j] - timeStamps[i]) <= self._maxIAT: + per += 1 + return per + + def _save(self, prefix, suffix, tidSetX): + """ + Saves the patterns that satisfy the periodic frequent property. + + :param prefix: the prefix of a pattern + :type prefix: list or None + :param suffix: the suffix of a patterns + :type suffix: list + :param tidSetX: the timestamp of a patterns + :type tidSetX: list + + """ + if prefix is None: + prefix = suffix + else: + prefix = prefix + suffix + val = self._getPeriodicSupport(tidSetX) + if val >= self._minPS: + self._finalPatterns[tuple(prefix)] = val + + def _Generation(self, prefix, itemSets, tidSets): + """ + Generates the patterns that satisfy the periodic frequent property. + + :param prefix: the prefix of a pattern + :type prefix: list or None + :param itemSets: the item sets of a patterns + :type itemSets: list + :param tidSets: the timestamp of a patterns + :type tidSets: list + """ + if len(itemSets) == 1: + i = itemSets[0] + tidi = tidSets[0] + self._save(prefix, [i], tidi) + return + for i in range(len(itemSets)): + itemI = itemSets[i] + if itemI is None: + continue + tidSetX = tidSets[i] + classItemSets = [] + classTidSets = [] + itemSetX = [itemI] + for j in range(i + 1, len(itemSets)): + itemJ = itemSets[j] + tidSetJ = tidSets[j] + y = list(set(tidSetX).intersection(tidSetJ)) + val = self._getPeriodicSupport(y) + if val >= self._minPS: + classItemSets.append(itemJ) + classTidSets.append(y) + newprefix = list(set(itemSetX)) + prefix + self._Generation(newprefix, classItemSets, classTidSets) + self._save(prefix, list(set(itemSetX)), tidSetX) + + def _getNeighbourItems(self, keySet): + """ + A function to get Neighbours of an item + + :param keySet: itemSet + :type keySet: str or tuple + :return: set of common neighbours + :rtype: set + """ + itemNeighbours = self._NeighboursMap.keys() + if isinstance(keySet, str): + if self._NeighboursMap.get(keySet) is None: + return [] + itemNeighbours = list(set(itemNeighbours).intersection(set(self._NeighboursMap.get(keySet)))) + if isinstance(keySet, tuple): + keySet = list(keySet) + for j in range(0, len(keySet)): + i = keySet[j] + itemNeighbours = list(set(itemNeighbours).intersection(set(self._NeighboursMap.get(i)))) + return itemNeighbours + +
+[docs] + def mapNeighbours(self): + """ + A function to map items to their Neighbours + """ + self._NeighboursMap = {} + if isinstance(self._nFile, _ab._pd.DataFrame): + data = [] + if self._nFile.empty: + print("its empty..") + i = self._nFile.columns.values.tolist() + if 'Neighbours' in i: + data = self._nFile['Neighbours'].tolist() + for i in data: + self._NeighboursMap[i[0]] = i[1:] + if isinstance(self._nFile, str): + if _ab._validators.url(self._nFile): + data = _ab._urlopen(self._nFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._NeighboursMap[temp[0]] = temp[1:] + else: + try: + with open(self._nFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._NeighboursMap[temp[0]] = temp[1:] + except IOError: + print("File Not Found") + quit()
+ + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + Frequent pattern mining process will start from here + """ + + self.mine()
+ + +
+[docs] + def mine(self): + """ + Frequent pattern mining process will start from here + """ + + # global items_sets, endTime, startTime + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + self._creatingItemSets() + #self._minSup = self._convert(self._minSup) + self.mapNeighbours() + self._finalPatterns = {} + plist = self._frequentOneItem() + for i in range(len(plist)): + itemX = plist[i] + tidSetX = self._tidList[itemX] + itemSetX = [itemX] + itemSets = [] + tidSets = [] + neighboursItems = self._getNeighbourItems(plist[i]) + for j in range(i + 1, len(plist)): + if not plist[j] in neighboursItems: + continue + itemJ = plist[j] + tidSetJ = self._tidList[itemJ] + y1 = list(set(tidSetX).intersection(tidSetJ)) + val = self._getPeriodicSupport(y1) + if val >= self._minPS: + itemSets.append(itemJ) + tidSets.append(y1) + self._Generation(itemSetX, itemSets, tidSets) + self._save(None, itemSetX, tidSetX) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Spatial Periodic Frequent patterns were generated successfully using SpatialEclat algorithm")
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + pat = "" + for i in a: + pat += str(i) + ' ' + data.append([pat, b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'periodicSupport']) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + pat = "" + for i in x: + pat += str(i) + '\t' + patternsAndSupport = pat.strip() + ": " + str(y) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Total number of Spatial Partial Periodic Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 6 or len(_ab._sys.argv) == 7: + if len(_ab._sys.argv) == 7: + _ap = STEclat(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5], _ab._sys.argv[6]) + if len(_ab._sys.argv) == 6: + _ap = STEclat(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + _ap.startMine() + _ap.mine() + print("Total number of Spatial Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") + + + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/highUtilityFrequentPattern/basic/HUFIM.html b/sphinx/_build/html/_modules/PAMI/highUtilityFrequentPattern/basic/HUFIM.html new file mode 100644 index 000000000..35339b3be --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/highUtilityFrequentPattern/basic/HUFIM.html @@ -0,0 +1,1088 @@ + + + + + + PAMI.highUtilityFrequentPattern.basic.HUFIM — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.highUtilityFrequentPattern.basic.HUFIM

+# HUFIM (High Utility Frequent Itemset Miner) algorithm helps us to mine High Utility Frequent ItemSets (HUFIs) from transactional databases.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.highUtilityFrequentPattern.basic import HUFIM as alg
+#
+#             obj =alg.HUFIM("input.txt", 35, 20)
+#
+#             obj.mine()
+#
+#             Patterns = obj.getPatterns()
+#
+#             print("Total number of high utility frequent Patterns:", len(Patterns))
+#
+#             obj.save("output")
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+
+from PAMI.highUtilityFrequentPattern.basic import abstract as _ab
+from typing import List, Dict, Union
+from deprecated import deprecated
+
+
+class _Transaction:
+    """
+    A class to store Transaction of a database
+
+    :Attributes:
+
+        items: list
+            A list of items in transaction 
+        utilities: list
+            A list of utilities of items in transaction
+        transactionUtility: int
+            represent total sum of all utilities in the database
+        prefixUtility:
+            prefix Utility values of item
+        offset:
+            an offset pointer, used by projected transactions
+        support:
+            maintains the support of the transaction
+
+    :Methods:
+
+        projectedTransaction(offsetE)
+            A method to create new Transaction from existing starting from offsetE until the end
+        getItems()
+            return items in transaction
+        getUtilities()
+            return utilities in transaction
+        getLastPosition()
+            return last position in a transaction
+        removeUnpromisingItems()
+            A method to remove items which are having low values when compared with minUtil
+        insertionSort()
+            A method to sort all items in the transaction
+        getSupport()
+            returns the support of the transaction
+    """
+    offset = 0
+    prefixUtility = 0
+    support = 1
+
+    def __init__(self, items: List[int], utilities: List[int], transactionUtility: int) -> None:
+        self.items = items
+        self.utilities = utilities
+        self.transactionUtility = transactionUtility
+        self.support = 1
+
+    def projectTransaction(self, offsetE: int) -> '_Transaction':
+        """
+        A method to create new Transaction from existing transaction starting from offsetE until the end
+
+        :param offsetE: an offset over the original transaction for projecting the transaction
+        :type offsetE: int
+        :return: a new transaction starting from offsetE until the end of the transaction
+        :rtype: _Transaction
+        """
+        new_transaction = _Transaction(self.items, self.utilities, self.transactionUtility)
+        utilityE = self.utilities[offsetE]
+        new_transaction.prefixUtility = self.prefixUtility + utilityE
+        new_transaction.transactionUtility = self.transactionUtility - utilityE
+        new_transaction.support = self.support
+        for i in range(self.offset, offsetE):
+            new_transaction.transactionUtility -= self.utilities[i]
+        new_transaction.offset = offsetE + 1
+        return new_transaction
+
+    def getItems(self) -> List[int]:
+        """
+        A method to return items in transaction
+
+        :return: the list of items in transaction starting from offsetE until the end of the transactions
+        :rtype: list
+        """
+        return self.items
+
+    def getUtilities(self) -> List[int]:
+        """
+        A method to return utilities in transaction
+
+        :return: the list of utilities in transaction starting from offsetE until the end of the transaction
+        :rtype: list
+        """
+        return self.utilities
+
+    def getLastPosition(self) -> int:
+        """
+        A method to return last position in a transaction
+        :return: the last position in a transaction
+        :rtype: int
+        """
+
+        return len(self.items) - 1
+
+    def getSupport(self) -> int:
+        """
+        A method to return support in a transaction
+
+        :return: the support in a transaction
+        :rtype: int
+        """
+
+        return self.support
+
+    def removeUnpromisingItems(self, oldNamesToNewNames: Dict[int, int]) -> None:
+        """
+        A method to remove items which are not present in the map passed to the function
+
+        :param oldNamesToNewNames: A map represent old names to new names
+        :type oldNamesToNewNames: map
+        :return: None
+        """
+        tempItems = []
+        tempUtilities = []
+        for idx, item in enumerate(self.items):
+            if item in oldNamesToNewNames:
+                tempItems.append(oldNamesToNewNames[item])
+                tempUtilities.append(self.utilities[idx])
+            else:
+                self.transactionUtility -= self.utilities[idx]
+        self.items = tempItems
+        self.utilities = tempUtilities
+        self.insertionSort()
+
+    def insertionSort(self) -> None:
+        """
+        A method to sort items in order
+
+        :return: None
+        """
+        for i in range(1, len(self.items)):
+            key = self.items[i]
+            utilityJ = self.utilities[i]
+            j = i - 1
+            while j >= 0 and key < self.items[j]:
+                self.items[j + 1] = self.items[j]
+                self.utilities[j + 1] = self.utilities[j]
+                j -= 1
+            self.items[j + 1] = key
+            self.utilities[j + 1] = utilityJ
+        
+
+class _Dataset:
+    """
+    A class represent the list of transactions in this dataset
+
+    :Attributes:
+
+        transactions :
+            the list of transactions in this dataset
+        maxItem:
+            the largest item name
+        
+    :methods:
+
+        createTransaction(line)
+            Create a transaction object from a line from the input file
+        getMaxItem()
+            return Maximum Item
+        getTransactions()
+            return transactions in database
+
+    """
+    transactions = []
+    maxItem = 0
+    
+    def __init__(self, datasetPath: Union[str, _ab._pd.DataFrame], sep: str) -> None:
+        self.strToInt = {}
+        self.intToStr = {}
+        self.cnt = 1
+        self.sep = sep
+        self.createItemSets(datasetPath)
+
+    def createItemSets(self, datasetPath: List[str]) -> None:
+        """
+        Storing the complete transactions of the database/input file in a database variable
+
+        :param datasetPath: list of paths to the input file to store
+        :type datasetPath: list
+        :return: None
+
+        """
+        self.Database = []
+        self.transactions = []
+        if isinstance(datasetPath, _ab._pd.DataFrame):
+            utilities, data, utilitySum = [], [], []
+            if datasetPath.empty:
+                print("its empty..")
+            i = datasetPath.columns.values.tolist()
+            if 'Transactions' in i:
+                data = datasetPath['Transactions'].tolist()
+            if 'Utilities' in i:
+                utilities = datasetPath['Utilities'].tolist()
+            if 'UtilitySum' in i:
+                utilitySum = datasetPath['UtilitySum'].tolist()
+            for k in range(len(data)):
+                self.transactions.append(self.createTransaction(data[k], utilities[k], utilitySum[k]))
+        if isinstance(datasetPath, str):
+            if _ab._validators.url(datasetPath):
+                data = _ab._urlopen(datasetPath)
+                for line in data:
+                    line = line.decode("utf-8")
+                    trans_list = line.strip().split(':')
+                    transactionUtility = int(trans_list[1])
+                    itemsString = trans_list[0].strip().split(self.sep)
+                    itemsString = [x for x in itemsString if x]
+                    utilityString = trans_list[2].strip().split(self.sep)
+                    utilityString = [x for x in utilityString if x]
+                    self.transactions.append(self.createTransaction(itemsString, utilityString, transactionUtility))
+            else:
+                try:
+                    with open(datasetPath, 'r', encoding='utf-8') as f:
+                        for line in f:
+                            trans_list = line.strip().split(':')
+                            transactionUtility = int(trans_list[1])
+                            itemsString = trans_list[0].strip().split(self.sep)
+                            itemsString = [x for x in itemsString if x]
+                            utilityString = trans_list[2].strip().split(self.sep)
+                            utilityString = [x for x in utilityString if x]
+                            self.transactions.append(self.createTransaction(itemsString, utilityString, transactionUtility))
+                except IOError:
+                    print("File Not Found")
+                    quit()
+
+    def createTransaction(self, items: List[str], utilities: List[str], utilitySum: int) -> _Transaction:
+        """
+        A method to create Transaction from dataset given
+
+        :param items: represent a single line of database
+        :type items: list
+        :param utilities: represent the utilities of items
+        :type utilities: list
+        :param utilitySum: represent  the utilitySum
+        :type utilitySum: int
+        :return: a Transaction from given dataset
+        :rtype: _Transaction
+        """
+        transactionUtility = utilitySum
+        itemsString = items
+        utilityString = utilities
+        items = []
+        utilities = []
+        for idx, item in enumerate(itemsString):
+            if self.strToInt.get(item) is None:
+                self.strToInt[item] = self.cnt
+                self.intToStr[self.cnt] = item
+                self.cnt += 1
+            item_int = self.strToInt.get(item)
+            if item_int > self.maxItem:
+                self.maxItem = item_int
+            items.append(item_int)
+            utilities.append(int(utilityString[idx]))
+        return _Transaction(items, utilities, transactionUtility)
+
+    def getMaxItem(self) -> int:
+        """
+        A method to return name of the largest item
+
+        :return: the name of the largest item in the dataset
+        :rtype: int
+        """
+        return self.maxItem
+
+    def getTransactions(self) -> List[_Transaction]:
+        """
+        A method to return transactions from database
+
+        :return: the list of transactions from database which have the highest utility
+        :rtype: list
+        """
+        return self.transactions
+
+
+
+[docs] +class HUFIM(_ab._utilityPatterns): + """ + :Description: HUFIM (High Utility Frequent Itemset Miner) algorithm helps us to mine High Utility Frequent ItemSets (HUFIs) from transactional databases. + + + :Reference: Kiran, R.U., Reddy, T.Y., Fournier-Viger, P., Toyoda, M., Reddy, P.K., & Kitsuregawa, M. (2019). + Efficiently Finding High Utility-Frequent Itemsets Using Cutoff and Suffix Utility. PAKDD 2019. + DOI: 10.1007/978-3-030-16145-3_15 + + + :param iFile: str : + Name of the Input file to mine complete set of Geo-referenced frequent sequence patterns + :param oFile: str : + Name of the output file to store complete set of Geo-referenced frequent sequence patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float. + :param minUtil: int : + The user given minUtil value. + :param candidateCount: int + Number of candidates + :param maxMemory: int + Maximum memory used by this program for running + :param nFile: str : + Name of the input file to mine complete set of Geo-referenced frequent sequence patterns + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the input file to mine complete set of patterns + oFile : file + Name of the output file to store complete set of patterns + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + minUtil : int + The user given minUtil value + minSup : float + The user given minSup value + highUtilityFrequentItemSets: map + set of high utility frequent itemSets + candidateCount: int + Number of candidates + utilityBinArrayLU: list + A map to hold the local utility values of the items in database + utilityBinArraySU: list + A map to hold the subtree utility values of the items is database + oldNamesToNewNames: list + A map which contains old names, new names of items as key value pairs + newNamesToOldNames: list + A map which contains new names, old names of items as key value pairs + singleItemSetsSupport: map + A map which maps from single itemsets (items) to their support + singleItemSetsUtility: map + A map which maps from single itemsets (items) to their utilities + maxMemory: float + Maximum memory used by this program for running + patternCount: int + Number of RHUI's + itemsToKeep: list + keep only the promising items i.e items that can extend other items to form RHUIs + itemsToExplore: list + list of items that needs to be explored + + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + backTrackingHUFIM(transactionsOfP, itemsToKeep, itemsToExplore, prefixLength) + A method to mine the RHUIs Recursively + useUtilityBinArraysToCalculateUpperBounds(transactionsPe, j, itemsToKeep) + A method to calculate the sub-tree utility and local utility of all items that can extend itemSet P and e + output(tempPosition, utility) + A method to output a relative-high-utility itemSet to file or memory depending on what the user chose + isEqual(transaction1, transaction2) + A method to Check if two transaction are identical + useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(dataset) + A method to calculate the sub tree utility values for single items + sortDatabase(self, transactions) + A Method to sort transaction + sortTransaction(self, trans1, trans2) + A Method to sort transaction + useUtilityBinArrayToCalculateLocalUtilityFirstTime(self, dataset) + A method to calculate local utility values for single itemSets + + **Executing the code on terminal** + -------------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 HUFIM.py <inputFile> <outputFile> <minUtil> <sep> + + Example Usage: + + (.venv) $ python3 HUFIM.py sampleTDB.txt output.txt 35 20 + + (.venv) $ python3 HUFIM.py sampleTDB.txt output.txt 35 20 + + .. note:: minSup will be considered in percentage of database transactions + + + **Sample run of importing the code** + ----------------------------------------------- + .. code-block:: python + + from PAMI.highUtilityFrequentPattern.basic import HUFIM as alg + + obj=alg.HUFIM("input.txt", 35, 20) + + obj.mine() + + Patterns = obj.getPatterns() + + print("Total number of high utility frequent Patterns:", len(Patterns)) + + obj.save("output") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + -------------------- + The complete program was written by pradeep pallikila under the supervision of Professor Rage Uday Kiran. + + """ + + _highUtilityFrequentItemSets = [] + _candidateCount = 0 + _utilityBinArrayLU = {} + _utilityBinArraySU = {} + _oldNamesToNewNames = {} + _newNamesToOldNames = {} + _singleItemSetsSupport = {} + _singleItemSetsUtility = {} + _strToInt = {} + _intToStr = {} + _temp = [0]*5000 + _patternCount = int() + _maxMemory = 0 + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _nFile = " " + _lno = 0 + _sep = "\t" + _minUtil = 0 + _minSup = 0 + _memoryUSS = float() + _memoryRSS = float() + + def __init__(self, iFile: str, minUtil: Union[int, float], minSup: Union[int, float], sep: str="\t") -> None: + super().__init__(iFile, minUtil, minSup, sep) + + def _convert(self, value) -> Union[int, float]: + """ + To convert the given user specified value + + :param value: user specified value + :type value: int or float or str + :return: converted value + :rtype: int or float + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._dataset.getTransactions()) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._dataset.getTransactions()) * value) + else: + value = int(value) + return value + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + High Utility Frequent Pattern mining start here + + :return: None + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + High Utility Frequent Pattern mining start here + + :return: None + """ + self._startTime = _ab._time.time() + self._finalPatterns = {} + self._dataset = [] + self._dataset = _Dataset(self._iFile, self._sep) + self._singleItemSetsSupport = _ab._defaultdict(int) + self._singleItemSetsUtility = _ab._defaultdict(int) + self._useUtilityBinArrayToCalculateLocalUtilityFirstTime(self._dataset) + self._minUtil = int(self._minUtil) + self._minSup = self._convert(self._minSup) + itemsToKeep = [] + for key in self._utilityBinArrayLU.keys(): + if self._utilityBinArrayLU[key] >= self._minUtil and self._singleItemSetsSupport[key] >= self._minSup: + itemsToKeep.append(key) + itemsToKeep = sorted(itemsToKeep, key=lambda x: self._singleItemSetsUtility[x], reverse=True) + currentName = 1 + for idx, item in enumerate(itemsToKeep): + self._oldNamesToNewNames[item] = currentName + self._newNamesToOldNames[currentName] = item + itemsToKeep[idx] = currentName + currentName += 1 + for transaction in self._dataset.getTransactions(): + transaction.removeUnpromisingItems(self._oldNamesToNewNames) + self._sortDatabase(self._dataset.getTransactions()) + emptyTransactionCount = 0 + for transaction in self._dataset.getTransactions(): + if len(transaction.getItems()) == 0: + emptyTransactionCount += 1 + self._dataset.transactions = self._dataset.transactions[emptyTransactionCount:] + # calculating suffix utility values + totalUtility = 0 + for item in itemsToKeep: + totalUtility += self._singleItemSetsUtility[self._newNamesToOldNames[item]] + # piItems + piItems = [] + for item in itemsToKeep: + if totalUtility >= self._minUtil: + piItems.append(item) + totalUtility -= self._singleItemSetsUtility[self._newNamesToOldNames[item]] + else: + break + self._useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(self._dataset) + itemsToExplore = [] + for item in piItems: + if self._utilityBinArraySU[item] >= self._minUtil: + itemsToExplore.append(item) + self._backTrackingHUFIM(self._dataset.getTransactions(), itemsToKeep, itemsToExplore, 0) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("High Utility Frequent patterns were generated successfully using HUFIM algorithm")
+ + + def _backTrackingHUFIM(self, transactionsOfP: List[_Transaction], itemsToKeep: List[int], itemsToExplore: List[int], prefixLength: int) -> None: + """ + A method to mine the HUFIs Recursively + + :param transactionsOfP: the list of transactions containing the current prefix P + :type transactionsOfP: list + :param itemsToKeep: the list of secondary items in the p-projected database + :type itemsToKeep: list + :param itemsToExplore: the list of primary items in the p-projected database + :type itemsToExplore: list + :param prefixLength: current prefixLength + :type prefixLength: int + :return: None + """ + # print("###############") + # print("P is", [self.dataset.intToStr.get(x) for x in self.temp[:prefixLength]]) + # print("items to explore", [self.dataset.intToStr.get(x) for x in [self.newNamesToOldNames[y] for y in itemsToExplore]]) + # print("items to keep", [self.dataset.intToStr.get(x) for x in [self.newNamesToOldNames[y] for y in itemsToKeep]]) + # print("--------------") + self._candidateCount += len(itemsToExplore) + for idx, e in enumerate(itemsToExplore): + # print("exploring item", self.dataset.intToStr.get(self.newNamesToOldNames[e])) + transactionsPe = [] + utilityPe = 0 + supportPe = 0 + previousTransaction = [] + consecutiveMergeCount = 0 + for transaction in transactionsOfP: + items = transaction.getItems() + if e in items: + positionE = items.index(e) + if transaction.getLastPosition() == positionE: + utilityPe += transaction.getUtilities()[positionE] + transaction.prefixUtility + supportPe += transaction.getSupport() + else: + projectedTransaction = transaction.projectTransaction(positionE) + utilityPe += projectedTransaction.prefixUtility + if previousTransaction == []: + previousTransaction = projectedTransaction + elif self._isEqual(projectedTransaction, previousTransaction): + if consecutiveMergeCount == 0: + items = previousTransaction.items[previousTransaction.offset:] + utilities = previousTransaction.utilities[previousTransaction.offset:] + support = previousTransaction.getSupport() + itemsCount = len(items) + positionPrevious = 0 + positionProjection = projectedTransaction.offset + while positionPrevious < itemsCount: + utilities[positionPrevious] += projectedTransaction.utilities[positionProjection] + positionPrevious += 1 + positionProjection += 1 + previousTransaction.prefixUtility += projectedTransaction.prefixUtility + sumUtilities = previousTransaction.prefixUtility + previousTransaction = _Transaction(items, utilities, previousTransaction.transactionUtility + projectedTransaction.transactionUtility) + previousTransaction.prefixUtility = sumUtilities + previousTransaction.support = support + previousTransaction.support += projectedTransaction.getSupport() + else: + positionPrevious = 0 + positionProjected = projectedTransaction.offset + itemsCount = len(previousTransaction.items) + while positionPrevious < itemsCount: + previousTransaction.utilities[positionPrevious] += projectedTransaction.utilities[ + positionProjected] + positionPrevious += 1 + positionProjected += 1 + previousTransaction.transactionUtility += projectedTransaction.transactionUtility + previousTransaction.prefixUtility += projectedTransaction.prefixUtility + previousTransaction.support += projectedTransaction.getSupport() + consecutiveMergeCount += 1 + else: + transactionsPe.append(previousTransaction) + supportPe += previousTransaction.getSupport() + previousTransaction = projectedTransaction + consecutiveMergeCount = 0 + transaction.offset = positionE + if previousTransaction != []: + transactionsPe.append(previousTransaction) + supportPe += previousTransaction.getSupport() + # print("support is", supportPe) + self._temp[prefixLength] = self._newNamesToOldNames[e] + if (utilityPe >= self._minUtil) and (supportPe >= self._minSup): + self._output(prefixLength, utilityPe, supportPe) + if supportPe >= self._minSup: + self._useUtilityBinArraysToCalculateUpperBounds(transactionsPe, idx, itemsToKeep) + newItemsToKeep = [] + newItemsToExplore = [] + for l in range(idx + 1, len(itemsToKeep)): + itemK = itemsToKeep[l] + if self._utilityBinArraySU[itemK] >= self._minUtil: + newItemsToExplore.append(itemK) + newItemsToKeep.append(itemK) + elif self._utilityBinArrayLU[itemK] >= self._minUtil: + newItemsToKeep.append(itemK) + if len(transactionsPe) != 0: + self._backTrackingHUFIM(transactionsPe, newItemsToKeep, newItemsToExplore, prefixLength + 1) + + def _useUtilityBinArraysToCalculateUpperBounds(self, transactionsPe: List[_Transaction], j: int, itemsToKeep: List[int]) -> None: + """ + A method to calculate the subtree utility and local utility of all items that can extend itemSet P U {e} + + :Attributes: + + :param transactionsPe: transactions the projected database for P U {e} + :type transactionsPe: list or Dataset + :param j:the position of j in the list of promising items + :type j:int + :param itemsToKeep :the list of promising items + :type itemsToKeep: list or Dataset + :return: None + """ + for i in range(j + 1, len(itemsToKeep)): + item = itemsToKeep[i] + self._utilityBinArrayLU[item] = 0 + self._utilityBinArraySU[item] = 0 + for transaction in transactionsPe: + sumRemainingUtility = 0 + i = len(transaction.getItems()) - 1 + while i >= transaction.offset: + item = transaction.getItems()[i] + if item in itemsToKeep: + sumRemainingUtility += transaction.getUtilities()[i] + self._utilityBinArraySU[item] += sumRemainingUtility + transaction.prefixUtility + self._utilityBinArrayLU[item] += transaction.transactionUtility + transaction.prefixUtility + i -= 1 + + def _output(self, tempPosition: int, utility: int, support: int): + """ + Method to print itemSets + + :Attributes: + + :param tempPosition: position of last item + :type tempPosition : int + :param utility: total utility of itemSet + :type utility: int + :param support: support of an itemSet + :type support: int + """ + self._patternCount += 1 + s1 = str() + for i in range(0, tempPosition+1): + s1 += self._dataset.intToStr.get((self._temp[i])) + if i != tempPosition: + s1 += "\t" + self._finalPatterns[s1] = [utility, support] + + def _isEqual(self, transaction1: _Transaction, transaction2: _Transaction) -> bool: + """ + A method to Check if two transaction are identical + + :param transaction1: the first transaction + :type transaction1: Trans + :param transaction2: the second transaction + :type transaction2: Trans + :return : whether both are identical or not + :rtype: bool + """ + length1 = len(transaction1.items) - transaction1.offset + length2 = len(transaction2.items) - transaction2.offset + if length1 != length2: + return False + position1 = transaction1.offset + position2 = transaction2.offset + while position1 < len(transaction1.items): + if transaction1.items[position1] != transaction2.items[position2]: + return False + position1 += 1 + position2 += 1 + return True + + def _useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(self, dataset: _Dataset) -> None: + """ + Scan the initial database to calculate the subtree utility of each item using a utility-bin array + + :param dataset: the transaction database + :type dataset: Dataset + :return : None + """ + for transaction in dataset.getTransactions(): + sumSU = 0 + i = len(transaction.getItems()) - 1 + while i >= 0: + item = transaction.getItems()[i] + currentUtility = transaction.getUtilities()[i] + sumSU += currentUtility + if item in self._utilityBinArraySU.keys(): + self._utilityBinArraySU[item] += sumSU + else: + self._utilityBinArraySU[item] = sumSU + i -= 1 + + def _sortDatabase(self, transactions: List[_Transaction]) -> None: + """ + A Method to sort transaction + + :param transactions: transactions of items + :type transactions: list + :return: None + """ + compareItems = _ab._functools.cmp_to_key(self._sortTransaction) + transactions.sort(key=compareItems) + + def _sortTransaction(self, trans1: _Transaction, trans2: _Transaction) -> int: + """ + A Method to sort transaction + + :param trans1: the first transaction + :type trans1: Trans + :param trans2:the second transaction + :type trans2: Trans + :return: sorted transaction + :rtype: int + """ + transItemsX = trans1.getItems() + transItemsY = trans2.getItems() + pos1 = len(transItemsX) - 1 + pos2 = len(transItemsY) - 1 + if len(transItemsX) < len(transItemsY): + while pos1 >= 0: + sub = transItemsY[pos2] - transItemsX[pos1] + if sub != 0: + return sub + pos1 -= 1 + pos2 -= 1 + return -1 + elif len(transItemsX) > len(transItemsY): + while pos2 >= 0: + sub = transItemsY[pos2] - transItemsX[pos1] + if sub != 0: + return sub + pos1 -= 1 + pos2 -= 1 + return 1 + else: + while pos2 >= 0: + sub = transItemsY[pos2] - transItemsX[pos1] + if sub != 0: + return sub + pos1 -= 1 + pos2 -= 1 + return 0 + + def _useUtilityBinArrayToCalculateLocalUtilityFirstTime(self, dataset: _Dataset) -> None: + """ + A method to calculate local utility of single itemSets + + :param dataset: the transaction database + :type dataset: databases + :return: None + """ + for transaction in dataset.getTransactions(): + for idx, item in enumerate(transaction.getItems()): + self._singleItemSetsSupport[item] += 1 + self._singleItemSetsUtility[item] += transaction.getUtilities()[idx] + if item in self._utilityBinArrayLU: + self._utilityBinArrayLU[item] += transaction.transactionUtility + else: + self._utilityBinArrayLU[item] = transaction.transactionUtility + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final patterns in a dataframe + + :return: returning patterns in a dataframe + :rtype: pd.DataFrame + """ + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b[0], b[1]]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Utility', 'Support']) + + return dataFrame
+ + +
+[docs] + def getPatterns(self) -> Dict[str, List[Union[int, float]]]: + """ + Function to send the set of patterns after completion of the mining process + + :return: returning patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime-self._startTime
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + """ + print("Total number of High Utility Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in seconds:", self.getRuntime())
+
+ + +if __name__ == '__main__': + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: #includes separator + _ap = HUFIM(_ab._sys.argv[1], int(_ab._sys.argv[3]), float(_ab._sys.argv[4]), _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: #takes "\t" as a separator + _ap = HUFIM(_ab._sys.argv[1], int(_ab._sys.argv[3]), float(_ab._sys.argv[4])) + _ap.startMine() + _ap.mine() + print("Total number of High Utility Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/highUtilityGeoreferencedFrequentPattern/basic/SHUFIM.html b/sphinx/_build/html/_modules/PAMI/highUtilityGeoreferencedFrequentPattern/basic/SHUFIM.html new file mode 100644 index 000000000..6a9594eee --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/highUtilityGeoreferencedFrequentPattern/basic/SHUFIM.html @@ -0,0 +1,1157 @@ + + + + + + PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM

+# Spatial High Utility Frequent ItemSet Mining (SHUFIM) aims to discover all itemSets in a spatioTemporal database
+# that satisfy the user-specified minimum utility, minimum support and maximum distance constraints
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.highUtilityGeoreferencedFrequentPattern.basic import SHUFIM as alg
+#
+#             obj=alg.SHUFIM("input.txt","Neighbours.txt",35,20)
+#
+#             obj.mine()
+#
+#             patterns = obj.getPatterns()
+#
+#             print("Total number of Spatial high utility frequent Patterns:", len(patterns))
+#
+#             obj.save("output")
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+from PAMI.highUtilityGeoreferencedFrequentPattern.basic import abstract as _ab
+from functools import cmp_to_key as _comToKey
+from deprecated import deprecated
+
+class _Transaction:
+    """
+    A class to store Transaction of a database
+
+    :Attributes:
+
+        items: list
+            A list of items in transaction 
+        utilities: list
+            A list of utilites of items in transaction
+        transactionUtility: int
+            represent total sum of all utilities in the database
+        pmus: list
+            represent the pmu (probable maximum utility) of each element in the transaction
+        prefixutility:
+            prefix Utility values of item
+        offset:
+            an offset pointer, used by projected transactions
+        support:
+            maintains the support of the transaction
+    :Methods:
+
+        projectedTransaction(offsetE):
+            A method to create new Transaction from existing till offsetE
+        getItems():
+            return items in transaction
+        getUtilities():
+            return utilities in transaction
+        getPmus():
+            return pmus in transaction
+        getLastPosition():
+            return last position in a transaction
+        removeUnpromisingItems():
+            A method to remove items with low Utility than minUtil
+        insertionSort():
+            A method to sort all items in the transaction
+        getSupport():
+            returns the support of the transaction
+    """
+    offset = 0
+    prefixUtility = 0
+    support = 1
+    
+    def __init__(self, items, utilities, transactionUtility, pmus=None):
+        self.items = items
+        self.utilities = utilities
+        self.transactionUtility = transactionUtility
+        if pmus is not None:
+            self.pmus = pmus
+        self.support = 1
+
+    def projectTransaction(self, offsetE):
+        """
+        A method to create new Transaction from existing till offsetE
+        :param offsetE: an offset over the original transaction for projecting the transaction
+        :type offsetE: int
+        """
+        newTransaction = _Transaction(self.items, self.utilities, self.transactionUtility)
+        utilityE = self.utilities[offsetE]
+        newTransaction.prefixUtility = self.prefixUtility + utilityE
+        newTransaction.transactionUtility = self.transactionUtility - utilityE
+        newTransaction.support = self.support
+        for i in range(self.offset, offsetE):
+            newTransaction.transactionUtility -= self.utilities[i]
+        newTransaction.offset = offsetE + 1
+        return newTransaction
+
+    def getItems(self):
+        """
+        A method to return items in transaction
+        """
+        return self.items
+
+    def getPmus(self):
+        """
+        A method to return pmus in transaction
+        """
+        return self.pmus
+
+    def getUtilities(self):
+        """
+        A method to return utilities in transaction
+        """
+        return self.utilities
+
+    # get the last position in this transaction
+    def getLastPosition(self):
+        """
+        A method to return last position in a transaction
+        """
+        return len(self.items) - 1
+
+    def getSupport(self):
+        """
+        A method to return support of a transaction (number of transactions in the original database having the items present in this transaction)
+        """
+        return self.support
+
+    def removeUnpromisingItems(self, oldNamesToNewNames):
+        """
+        A method to remove items with low Utility than minUtil
+        :param oldNamesToNewNames: A map represent old names to new names
+        :type oldNamesToNewNames: map
+        """
+        tempItems = []
+        tempUtilities = []
+        for idx, item in enumerate(self.items):
+            if item in oldNamesToNewNames:
+                tempItems.append(oldNamesToNewNames[item])
+                tempUtilities.append(self.utilities[idx])
+            else:
+                self.transactionUtility -= self.utilities[idx]
+        self.items = tempItems
+        self.utilities = tempUtilities
+        self.insertionSort()
+
+    def insertionSort(self):
+        """
+        A method to sort items in order
+        """
+        for i in range(1, len(self.items)):
+            key = self.items[i]
+            utilityJ = self.utilities[i]
+            j = i - 1
+            while j >= 0 and key < self.items[j]:
+                self.items[j + 1] = self.items[j]
+                self.utilities[j + 1] = self.utilities[j]
+                j -= 1
+            self.items[j + 1] = key
+            self.utilities[j + 1] = utilityJ
+
+
+class _Dataset:
+    """
+    A class represent the list of transactions in this dataset
+
+    :Attributes:
+
+        transactions :
+            the list of transactions in this dataset
+        maxItem:
+            the largest item name
+        
+    :methods:
+
+        createTransaction(line):
+            Create a transaction object from a line from the input file
+        getMaxItem():
+            return Maximum Item
+        getTransactions():
+            return transactions in database
+
+    """
+    transactions = []
+    maxItem = 0
+    
+    def __init__(self, datasetPath, sep):
+        self.strToInt = {}
+        self.intToStr = {}
+        self.cnt = 1
+        self.sep = sep
+        self.transactions = []
+        self.createItemSets(datasetPath)
+
+    def createItemSets(self, datasetPath):
+        """
+        Storing the complete transactions of the database/input file in a database variable
+
+        :param datasetPath: Path to the input file
+
+        :type datasetPath: str
+        """
+        pmuString = None
+        if isinstance(datasetPath, _ab._pd.DataFrame):
+            utilities, data, utilitySum, pmuString = [], [], [], []
+            if datasetPath.empty:
+                print("its empty..")
+            i = datasetPath.columns.values.tolist()
+            if 'Transactions' in i:
+                data = datasetPath['Transactions'].tolist()
+            if 'Utilities' in i:
+                utilities = datasetPath['Utilities'].tolist()
+            if 'UtilitySum' in i:
+                utilitySum = datasetPath['UtilitySum'].tolist()
+            if 'pmuString' in i:
+                utilitySum = datasetPath['pmuString'].tolist()
+            for k in range(len(data)):
+                self.transactions.append(self.createTransaction(data[k], utilities[k], utilitySum[k], pmuString[k]))
+        if isinstance(datasetPath, str):
+            if _ab._validators.url(datasetPath):
+                data = _ab._urlopen(datasetPath)
+                for line in data:
+                    line = line.decode("utf-8")
+                    trans_list = line.strip().split(':')
+                    transactionUtility = int(trans_list[1])
+                    itemsString = trans_list[0].strip().split(self.sep)
+                    itemsString = [x for x in itemsString if x]
+                    utilityString = trans_list[2].strip().split(self.sep)
+                    utilityString = [x for x in utilityString if x]
+                    if len(trans_list) == 4:
+                        pmuString = trans_list[3].strip().split(self.sep)
+                        pmuString = [x for x in pmuString if x]
+                    self.transactions.append(self.createTransaction(itemsString, utilityString, transactionUtility, pmuString))
+            else:
+                try:
+                    with open(datasetPath, 'r', encoding='utf-8') as f:
+                        for line in f:
+                            trans_list = line.strip().split(':')
+                            transactionUtility = int(trans_list[1])
+                            itemsString = trans_list[0].strip().split(self.sep)
+                            itemsString = [x for x in itemsString if x]
+                            utilityString = trans_list[2].strip().split(self.sep)
+                            utilityString = [x for x in utilityString if x]
+                            if len(trans_list) == 4:
+                                pmuString = trans_list[3].strip().split(self.sep)
+                                pmuString = [x for x in pmuString if x]
+                            self.transactions.append(
+                                self.createTransaction(itemsString, utilityString, transactionUtility, pmuString))
+                except IOError:
+                    print("File Not Found")
+                    quit()
+
+    def createTransaction(self, items, utilities, utilitySum, pmustring):
+        """
+        A method to create Transaction from dataset given
+        :param items: represent a utility items in a transaction
+        :type items: list
+        :param utilities: represent utility of an item in transaction
+        :type utilities: list
+        :param utilitySum: represent utility sum of  transaction
+        :type utilitySum: int
+        :param pmustring: represent a pmustring in a given dataset
+        :type pmustring: str
+        """
+        transactionUtility = utilitySum
+        itemsString = items
+        utilityString = utilities
+        pmuString = pmustring
+        items = []
+        utilities = []
+        pmus = []
+        for idx, item in enumerate(itemsString):
+            if (self.strToInt).get(item) is None:
+                self.strToInt[item] = self.cnt
+                self.intToStr[self.cnt] = item
+                self.cnt += 1
+            itemInt = self.strToInt.get(item)
+            if itemInt > self.maxItem:
+                self.maxItem = itemInt
+            items.append(itemInt)
+            utilities.append(int(utilityString[idx]))
+            if pmuString != None:
+                pmus.append(int(pmuString[idx]))
+        if pmuString == None:
+            pmus = None
+        return _Transaction(items, utilities, transactionUtility, pmus)
+
+    def getMaxItem(self):
+        """
+        A method to return name of the largest item
+        """
+        return self.maxItem
+
+    def getTransactions(self):
+        """
+        A method to return transactions from database
+        """
+        return self.transactions
+
+
+
+[docs] +class SHUFIM(_ab._utilityPatterns): + """ + :Description: Spatial High Utility Frequent ItemSet Mining (SHUFIM) aims to discover all itemSets in a spatioTemporal database + that satisfy the user-specified minimum utility, minimum support and maximum distance constraints + + :Reference: 10.1007/978-3-030-37188-3_17 + + :param iFile: str : + Name of the Input file to mine complete set of Geo-referenced frequent sequence patterns + :param oFile: str : + Name of the output file to store complete set of Geo-referenced frequent sequence patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float. + :param minUtil: int : + The user given minUtil value. + :param candidateCount: int + Number of candidates + :param maxMemory: int + Maximum memory used by this program for running + :param nFile: str : + Name of the input file to mine complete set of Geo-referenced frequent sequence patterns + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the input file to mine complete set of frequent patterns + nFile : file + Name of the Neighbours file that contain neighbours of items + oFile : file + Name of the output file to store complete set of frequent patterns + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + minUtil : int + The user given minUtil + minSup : float + The user given minSup value + highUtilityFrequentSpatialItemSets: map + set of high utility itemSets + candidateCount: int + Number of candidates + utilityBinArrayLU: list + A map to hold the pmu values of the items in database + utilityBinArraySU: list + A map to hold the subtree utility values of the items is database + oldNamesToNewNames: list + A map to hold the subtree utility values of the items is database + newNamesToOldNames: list + A map to store the old name corresponding to new name + Neighbours : map + A dictionary to store the neighbours of a item + maxMemory: float + Maximum memory used by this program for running + patternCount: int + Number of SHUFI's (Spatial High Utility Frequent Itemsets) + itemsToKeep: list + keep only the promising items ie items whose supersets can be required patterns + itemsToExplore: list + keep items that subtreeUtility grater than minUtil + + :Methods : + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + calculateNeighbourIntersection(self, prefixLength) + A method to return common Neighbours of items + backtrackingEFIM(transactionsOfP, itemsToKeep, itemsToExplore, prefixLength) + A method to mine the SHUIs Recursively + useUtilityBinArraysToCalculateUpperBounds(transactionsPe, j, itemsToKeep, neighbourhoodList) + A method to calculate the sub-tree utility and local utility of all items that can extend itemSet P and e + output(tempPosition, utility) + A method ave a high-utility itemSet to file or memory depending on what the user chose + isEqual(transaction1, transaction2) + A method to Check if two transaction are identical + intersection(lst1, lst2) + A method that return the intersection of 2 list + useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(dataset) + Scan the initial database to calculate the subtree utility of each items using a utility-bin array + sortDatabase(self, transactions) + A Method to sort transaction in the order of PMU + sortTransaction(self, trans1, trans2) + A Method to sort transaction in the order of PMU + useUtilityBinArrayToCalculateLocalUtilityFirstTime(self, dataset) + A method to scan the database using utility bin array to calculate the pmus + + **Executing the code on terminal :** + ----------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 SHUFIM.py <inputFile> <outputFile> <Neighbours> <minUtil> <minSup> <sep> + + Example Usage: + + (.venv) $ python3 SHUFIM.py sampleTDB.txt output.txt sampleN.txt 35 20 + + .. note:: minSup will be considered in percentage of database transactions + + + **Sample run of importing the code:** + ----------------------------------------- + .. code-block:: python + + from PAMI.highUtilityGeoreferencedFrequentPattern.basic import SHUFIM as alg + + obj=alg.SHUFIM("input.txt","Neighbours.txt",35,20) + + obj.mine() + + patterns = obj.getPatterns() + + print("Total number of Spatial high utility frequent Patterns:", len(patterns)) + + obj.save("output") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + --------------------- + + The complete program was written by Pradeep Pallikila under the supervision of Professor Rage Uday Kiran. + + """ + _candidateCount = 0 + _utilityBinArrayLU = {} + _utilityBinArraySU = {} + _oldNamesToNewNames = {} + _newNamesToOldNames = {} + _singleItemSetsSupport = {} + _singleItemSetsUtility = {} + _strToint = {} + _intTostr = {} + _Neighbours = {} + _temp = [0] * 5000 + _maxMemory = 0 + _startTime = float() + _endTime = float() + _minSup = str() + _maxPer = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _nFile = " " + _sep = "\t" + _minUtil = 0 + _memoryUSS = float() + _memoryRSS = float() + + def __init__(self, iFile, nFile, minUtil, minSup, sep="\t"): + super().__init__(iFile, nFile, minUtil, minSup, sep) + + def _convert(self, value): + """ + To convert the type of user specified minSup value + + :param value: user specified minSup value + :type value: int o float or str + :return: converted type + :rtype: float + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._dataset.getTransactions()) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._dataset.getTransactions()) * value) + else: + value = int(value) + return value + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + High Utility Frequent Pattern mining start here + """ + self.mine()
+ + +
+[docs] + def mine(self): + """ + High Utility Frequent Pattern mining start here + """ + self._startTime = _ab._time.time() + self._patternCount = 0 + self._finalPatterns = {} + self._dataset = _Dataset(self._iFile, self._sep) + self._singleItemSetsSupport = _ab._defaultdict(int) + self._singleItemSetsUtility = _ab._defaultdict(int) + self._minUtil = int(self._minUtil) + self._minSup = self._convert(self._minSup) + with open(self._nFile, 'r') as o: + lines = o.readlines() + for line in lines: + line = line.split("\n")[0] + line_split = line.split(self._sep) + item = self._dataset.strToInt.get(line_split[0]) + lst = [] + for i in range(1, len(line_split)): + lst.append(self._dataset.strToInt.get(line_split[i])) + self._Neighbours[item] = lst + o.close() + InitialMemory = _ab._psutil.virtual_memory()[3] + self._useUtilityBinArrayToCalculateLocalUtilityFirstTime(self._dataset) + _itemsToKeep = [] + for key in self._utilityBinArrayLU.keys(): + if self._utilityBinArrayLU[key] >= self._minUtil and self._singleItemSetsSupport[key] >= self._minSup: + _itemsToKeep.append(key) + # sorting items in decreasing order of their utilities + _itemsToKeep = sorted(_itemsToKeep, key=lambda x: self._singleItemSetsUtility[x], reverse=True) + _currentName = 1 + for idx, item in enumerate(_itemsToKeep): + self._oldNamesToNewNames[item] = _currentName + self._newNamesToOldNames[_currentName] = item + _itemsToKeep[idx] = _currentName + _currentName += 1 + for transaction in self._dataset.getTransactions(): + transaction.removeUnpromisingItems(self._oldNamesToNewNames) + self._sortDatabase(self._dataset.getTransactions()) + _emptyTransactionCount = 0 + for transaction in self._dataset.getTransactions(): + if len(transaction.getItems()) == 0: + _emptyTransactionCount += 1 + self._dataset.transactions = self._dataset.transactions[_emptyTransactionCount:] + # calculating neighborhood suffix utility values + _secondary = [] + for idx, item in enumerate(_itemsToKeep): + _cumulativeUtility = self._singleItemSetsUtility[self._newNamesToOldNames[item]] + if self._newNamesToOldNames[item] in self._Neighbours: + neighbors = [self._oldNamesToNewNames[y] for y in self._Neighbours[self._newNamesToOldNames[item]] if y in self._oldNamesToNewNames] + for i in range(idx+1, len(_itemsToKeep)): + _nextItem = _itemsToKeep[i] + if _nextItem in neighbors: + _cumulativeUtility += self._singleItemSetsUtility[self._newNamesToOldNames[_nextItem]] + if _cumulativeUtility >= self._minUtil: + _secondary.append(item) + self._useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(self._dataset) + _itemsToExplore = [] + for item in _secondary: + if self._utilityBinArraySU[item] >= self._minUtil: + _itemsToExplore.append(item) + _commonitems = [] + for i in range(self._dataset.maxItem): + _commonitems.append(i) + self._backtrackingEFIM(self._dataset.getTransactions(), _itemsToKeep, _itemsToExplore, 0) + _finalMemory = _ab._psutil.virtual_memory()[3] + memory = (_finalMemory - InitialMemory) / 10000 + if memory > self._maxMemory: + self._maxMemory = memory + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print('Spatial High Utility Frequent Itemsets generated successfully using SHUFIM algorithm')
+ + + def _backtrackingEFIM(self, transactionsOfP, itemsToKeep, itemsToExplore, prefixLength): + """ + A method to mine the SHUFIs Recursively + :param transactionsOfP: the list of transactions containing the current prefix P + :type transactionsOfP: list + :param itemsToKeep: the list of secondary items in the p-projected database + :type itemsToKeep: list + :param itemsToExplore: the list of primary items in the p-projected database + :type itemsToExplore: list + :param prefixLength: current prefixLength + :type prefixLength: int + """ + self._candidateCount += len(itemsToExplore) + for idx, e in enumerate(itemsToExplore): + initialMemory = _ab._psutil.virtual_memory()[3] + transactionsPe = [] + utilityPe = 0 + supportPe = 0 + previousTransaction = [] + consecutiveMergeCount = 0 + for transaction in transactionsOfP: + items = transaction.getItems() + if e in items: + positionE = items.index(e) + if transaction.getLastPosition() == positionE: + utilityPe += transaction.getUtilities()[positionE] + transaction.prefixUtility + supportPe += transaction.getSupport() + else: + projectedTransaction = transaction.projectTransaction(positionE) + utilityPe += projectedTransaction.prefixUtility + if previousTransaction == []: + previousTransaction = projectedTransaction + elif self._isEqual(projectedTransaction, previousTransaction): + if consecutiveMergeCount == 0: + items = previousTransaction.items[previousTransaction.offset:] + utilities = previousTransaction.utilities[previousTransaction.offset:] + support = previousTransaction.getSupport() + itemsCount = len(items) + positionPrevious = 0 + positionProjection = projectedTransaction.offset + while positionPrevious < itemsCount: + utilities[positionPrevious] += projectedTransaction.utilities[positionProjection] + positionPrevious += 1 + positionProjection += 1 + previousTransaction.prefixUtility += projectedTransaction.prefixUtility + sumUtilities = previousTransaction.prefixUtility + previousTransaction = _Transaction(items, utilities, previousTransaction.transactionUtility + projectedTransaction.transactionUtility) + previousTransaction.prefixUtility = sumUtilities + previousTransaction.support = support + previousTransaction.support += projectedTransaction.getSupport() + else: + positionPrevious = 0 + positionProjected = projectedTransaction.offset + itemsCount = len(previousTransaction.items) + while positionPrevious < itemsCount: + previousTransaction.utilities[positionPrevious] += projectedTransaction.utilities[ + positionProjected] + positionPrevious += 1 + positionProjected += 1 + previousTransaction.transactionUtility += projectedTransaction.transactionUtility + previousTransaction.prefixUtility += projectedTransaction.prefixUtility + previousTransaction.support += projectedTransaction.getSupport() + consecutiveMergeCount += 1 + else: + transactionsPe.append(previousTransaction) + supportPe += previousTransaction.getSupport() + previousTransaction = projectedTransaction + consecutiveMergeCount = 0 + transaction.offset = positionE + if previousTransaction != []: + transactionsPe.append(previousTransaction) + supportPe += previousTransaction.getSupport() + self._temp[prefixLength] = self._newNamesToOldNames[e] + if utilityPe >= self._minUtil and supportPe >= self._minSup: + self._output(prefixLength, utilityPe, supportPe) + if supportPe >= self._minSup: + neighbourhoodList = self._calculateNeighbourIntersection(prefixLength) + #print(neighbourhoodList) + self._useUtilityBinArraysToCalculateUpperBounds(transactionsPe, idx, itemsToKeep, neighbourhoodList) + newItemsToKeep = [] + newItemsToExplore = [] + for l in range(idx + 1, len(itemsToKeep)): + itemK = itemsToKeep[l] + if self._utilityBinArraySU[itemK] >= self._minUtil: + if itemK in neighbourhoodList: + newItemsToExplore.append(itemK) + newItemsToKeep.append(itemK) + elif self._utilityBinArrayLU[itemK] >= self._minUtil: + if itemK in neighbourhoodList: + newItemsToKeep.append(itemK) + self._backtrackingEFIM(transactionsPe, newItemsToKeep, newItemsToExplore, prefixLength + 1) + finalMemory = _ab._psutil.virtual_memory()[3] + memory = (finalMemory - initialMemory) / 10000 + if self._maxMemory < memory: + self._maxMemory = memory + + def _useUtilityBinArraysToCalculateUpperBounds(self, transactionsPe, j, itemsToKeep, neighbourhoodList): + """ + A method to calculate the subtree utility and local utility of all items that can extend itemSet P U {e} + + :Attributes: + + :param transactionsPe: transactions the projected database for P U {e} + :type transactionsPe: list + :param j:the position of j in the list of promising items + :type j:int + :param itemsToKeep :the list of promising items + :type itemsToKeep: list + :param neighbourhoodList : the list of promising items that can extend itemSet P U {e} + :type neighbourhoodList: list + + """ + for i in range(j + 1, len(itemsToKeep)): + item = itemsToKeep[i] + self._utilityBinArrayLU[item] = 0 + self._utilityBinArraySU[item] = 0 + for transaction in transactionsPe: + length = len(transaction.getItems()) + i = length - 1 + while i >= transaction.offset: + item = transaction.getItems()[i] + if item in itemsToKeep: + remainingUtility = 0 + if self._newNamesToOldNames[item] in self._Neighbours: + itemNeighbours = self._Neighbours[self._newNamesToOldNames[item]] + for k in range(i, length): + transaction_item = transaction.getItems()[k] + if self._newNamesToOldNames[transaction_item] in itemNeighbours and transaction_item in neighbourhoodList: + remainingUtility += transaction.getUtilities()[k] + + remainingUtility += transaction.getUtilities()[i] + self._utilityBinArraySU[item] += remainingUtility + transaction.prefixUtility + self._utilityBinArrayLU[item] += transaction.transactionUtility + transaction.prefixUtility + i -= 1 + + def _calculateNeighbourIntersection(self, prefixLength): + """ + A method to find common Neighbours + :param prefixLength: the prefix itemSet + :type prefixLength:int + """ + intersectionList = self._Neighbours.get(self._temp[0]) + for i in range(1, prefixLength+1): + intersectionList = self._intersection(self._Neighbours[self._temp[i]], intersectionList) + finalIntersectionList = [] + if intersectionList is None: + return finalIntersectionList + for item in intersectionList: + if item in self._oldNamesToNewNames: + finalIntersectionList.append(self._oldNamesToNewNames[item]) + return finalIntersectionList + + def _output(self, tempPosition, utility, support): + """ + A method save all high-utility itemSet to file or memory depending on what the user chose + :param tempPosition: position of last item + :type tempPosition : int + :param utility: total utility of itemSet + :type utility: int + :param support: support of an itemSet + :type support: int + """ + self._patternCount += 1 + s1 = str() + for i in range(0, tempPosition+1): + s1 += self._dataset.intToStr.get((self._temp[i])) + if i != tempPosition: + s1 += "\t" + self._finalPatterns[s1] = [utility, support] + + def _isEqual(self, transaction1, transaction2): + """ + A method to Check if two transaction are identical + :param transaction1: the first transaction + :type transaction1: Trans + :param transaction2: the second transaction + :type transaction2: Trans + :return : whether both are identical or not + :rtype: bool + """ + + length1 = len(transaction1.items) - transaction1.offset + length2 = len(transaction2.items) - transaction2.offset + if length1 != length2: + return False + position1 = transaction1.offset + position2 = transaction2.offset + while position1 < len(transaction1.items): + if transaction1.items[position1] != transaction2.items[position2]: + return False + position1 += 1 + position2 += 1 + return True + + def _intersection(self, lst1, lst2): + """ + A method that return the intersection of 2 list + + :param lst1: items neighbour to item1 + :type lst1: list + :param lst2: items neighbour to item2 + :type lst2: list + :return :intersection of two lists + :rtype : list + """ + temp = set(lst2) + lst3 = [value for value in lst1 if value in temp] + return lst3 + + def _useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(self, dataset): + """ + Scan the initial database to calculate the subtree utility of each item using a utility-bin array + :param dataset: the transaction database + :type dataset: Dataset + """ + for transaction in dataset.getTransactions(): + items = transaction.getItems() + utilities = transaction.getUtilities() + for idx, item in enumerate(items): + if item not in self._utilityBinArraySU: + self._utilityBinArraySU[item] = 0 + if self._newNamesToOldNames[item] not in self._Neighbours: + self._utilityBinArraySU[item] += utilities[idx] + continue + i = idx + 1 + sumSu = utilities[idx] + while i < len(items): + if self._newNamesToOldNames[items[i]] in self._Neighbours[self._newNamesToOldNames[item]]: + sumSu += utilities[i] + i += 1 + self._utilityBinArraySU[item] += sumSu + + def _sortDatabase(self, transactions): + """ + A Method to sort transaction in the order of PMU + :param transactions: transaction of items + :type transactions: Transaction + :return: sorted transaction + :rtype: Trans + """ + cmp_items = _comToKey(self._sortTransaction) + transactions.sort(key=cmp_items) + + def _sortTransaction(self, trans1, trans2): + """ + A Method to sort transaction in the order of PMU + :param trans1: the first transaction + :type trans1: Trans + :param trans2:the second transaction + :type trans2: Trans + :return: sorted transaction + :rtype: Trans + """ + trans1_items = trans1.getItems() + trans2_items = trans2.getItems() + pos1 = len(trans1_items) - 1 + pos2 = len(trans2_items) - 1 + if len(trans1_items) < len(trans2_items): + while pos1 >= 0: + sub = trans2_items[pos2] - trans1_items[pos1] + if sub != 0: + return sub + pos1 -= 1 + pos2 -= 1 + return -1 + elif len(trans1_items) > len(trans2_items): + while pos2 >= 0: + sub = trans2_items[pos2] - trans1_items[pos1] + if sub != 0: + return sub + pos1 -= 1 + pos2 -= 1 + return 1 + else: + while pos2 >= 0: + sub = trans2_items[pos2] - trans1_items[pos1] + if sub != 0: + return sub + pos1 -= 1 + pos2 -= 1 + return 0 + + def _useUtilityBinArrayToCalculateLocalUtilityFirstTime(self, dataset): + """ + A method to scan the database using utility bin array to calculate the pmus + :param dataset: the transaction database + :type dataset: dataset + """ + for transaction in dataset.getTransactions(): + for idx, item in enumerate(transaction.getItems()): + self._singleItemSetsSupport[item] += 1 + self._singleItemSetsUtility[item] += transaction.getUtilities()[idx] + pmu = transaction.getUtilities()[idx] + if item in self._Neighbours: + neighbors = self._Neighbours[item] + for idx, item in enumerate(transaction.getItems()): + if item in neighbors: + pmu += transaction.getUtilities()[idx] + if item in self._utilityBinArrayLU: + # self._utilityBinArrayLU[item] += transaction.getPmus()[idx] + self._utilityBinArrayLU[item] += pmu + else: + # self._utilityBinArrayLU[item] = transaction.getPmus()[idx] + self._utilityBinArrayLU[item] = pmu + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final patterns in a dataframe + :return: returning patterns in a dataframe + :rtype: pd.DataFrame + """ + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b[0], b[1]]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Utility', 'Support']) + + return dataFrame
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of patterns after completion of the mining process + + :return: returning patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self.oFile = outFile + writer = open(self.oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime-self._startTime
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Total number of Spatial High Utility Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in seconds:", self.getRuntime())
+
+ + +
+[docs] +def main(): + inputFile = '/home/nakamura/workspace/labwork/PAMI/PAMI/highUtilityGeoreferencedFrequentPattern/basic/mushroom_utility_spmf.txt' + neighborFile = '/home/nakamura/workspace/labwork/PAMI/PAMI/highUtilityGeoreferencedFrequentPattern/basic/mushroom_utility_spmf.txt' + + minUtilCount = 10000 + minSup = 100 + seperator = ' ' + obj = SHUFIM(iFile=inputFile, nFile=neighborFile, minUtil=minUtilCount, minSup=minSup, sep=seperator) #initialize + obj.mine() + obj.printResults() + print(obj.getPatterns())
+ + + +if __name__ == '__main__': + main() + # _ap = str() + # if len(_ab._sys.argv) == 6 or len(_ab._sys.argv) == 7: + # if len(_ab._sys.argv) == 7: + # _ap = SHUFIM(_ab._sys.argv[1], _ab._sys.argv[3], int(_ab._sys.argv[4]), _ab._sys.argv[5], _ab._sys.argv[6]) + # if len(_ab._sys.argv) == 6: + # _ap = SHUFIM(_ab._sys.argv[1], _ab._sys.argv[3], int(_ab._sys.argv[4]), _ab._sys.argv[5]) + # _ap.startMine() + # _ap.mine() + # print("Total number of Spatial High Utility Frequent Patterns:", len(_ap.getPatterns())) + # _ap.save(_ab._sys.argv[2]) + # print("Total Memory in USS:", _ap.getMemoryUSS()) + # print("Total Memory in RSS", _ap.getMemoryRSS()) + # print("Total ExecutionTime in seconds:", _ap.getRuntime()) + # else: + # print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/highUtilityPattern/basic/EFIM.html b/sphinx/_build/html/_modules/PAMI/highUtilityPattern/basic/EFIM.html new file mode 100644 index 000000000..a88ceb071 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/highUtilityPattern/basic/EFIM.html @@ -0,0 +1,998 @@ + + + + + + PAMI.highUtilityPattern.basic.EFIM — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.highUtilityPattern.basic.EFIM

+# EFIM is one of the fastest algorithm to mine High Utility ItemSets from transactional databases.
+#
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.highUtilityPattern.basic import EFIM as alg
+#
+#             obj=alg.EFIM("input.txt",35)
+#
+#             obj.mine()
+#
+#             Patterns = obj.getPatterns()
+#
+#             print("Total number of high utility Patterns:", len(Patterns))
+#
+#             obj.save("output")
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.highUtilityPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+from deprecated import deprecated
+
+
+class _Transaction:
+    """
+        A class to store Transaction of a database
+
+    :Attributes:
+
+        items: list
+            A list of items in transaction 
+        utilities: list
+            A list of utilities of items in transaction
+        transactionUtility: int
+            represent total sum of all utilities in the database
+        prefixUtility:
+            prefix Utility values of item
+        offset:
+            an offset pointer, used by projected transactions
+    :Methods:
+
+        projectedTransaction(offsetE):
+            A method to create new Transaction from existing starting from offsetE until the end
+        getItems():
+            return items in transaction
+        getUtilities():
+            return utilities in transaction
+        getLastPosition():
+            return last position in a transaction
+        removeUnpromisingItems():
+            A method to remove items which are having low values when compared with minUtil
+        insertionSort():
+            A method to sort all items in the transaction
+    """
+    offset = 0
+    prefixUtility = 0
+
+    def __init__(self, items: list, utilities: list, transactionUtility: int) -> None:
+        self.items = items
+        self.utilities = utilities
+        self.transactionUtility = transactionUtility
+
+    def projectTransaction(self, offsetE: int) -> '_Transaction':
+        """
+        A method to create new Transaction from existing transaction starting from offsetE until the end
+        :param offsetE: an offset over the original transaction for projecting the transaction
+        :type offsetE: int
+        :return: a new transaction after projecting the transaction starting from offsetE until the end of the transaction
+        :rtype: _Transaction
+        """
+        new_transaction = _Transaction(self.items, self.utilities, self.transactionUtility)
+        utilityE = self.utilities[offsetE]
+        new_transaction.prefixUtility = self.prefixUtility + utilityE
+        new_transaction.transactionUtility = self.transactionUtility - utilityE
+        for i in range(self.offset, offsetE):
+            new_transaction.transactionUtility -= self.utilities[i]
+        new_transaction.offset = offsetE + 1
+        return new_transaction
+
+    def getItems(self) -> list:
+        """
+        A method to return items in transaction
+        :return: list of items in transaction after projecting the transaction starting from offsetE until the end of the transaction
+        :rtype: list
+        """
+        return self.items
+
+    def getUtilities(self) -> list:
+        """
+        A method to return utilities in transaction
+        :return: list of utilities in transaction
+        :rtype: list
+        """
+        return self.utilities
+
+    def getLastPosition(self) -> int:
+        """
+        A method to return last position in a transaction
+        :return: last position in a transaction after projecting the transaction starting from offsetE until the end of the transaction
+        :rtype: int
+        """
+
+        return len(self.items) - 1
+
+    def removeUnpromisingItems(self, oldNamesToNewNames: dict) -> None:
+        """
+        A method to remove items which are not present in the map passed to the function
+        :param oldNamesToNewNames: A map represent old names to new names
+        :type oldNamesToNewNames: map
+        :return: None
+        """
+        tempItems = []
+        tempUtilities = []
+        for idx, item in enumerate(self.items):
+            if item in oldNamesToNewNames:
+                tempItems.append(oldNamesToNewNames[item])
+                tempUtilities.append(self.utilities[idx])
+            else:
+                self.transactionUtility -= self.utilities[idx]
+        self.items = tempItems
+        self.utilities = tempUtilities
+        self.insertionSort()
+
+    def insertionSort(self) -> None:
+        """
+        A method to sort items in order
+        :return: None
+        """
+        for i in range(1, len(self.items)):
+            key = self.items[i]
+            utilityJ = self.utilities[i]
+            j = i - 1
+            while j >= 0 and key < self.items[j]:
+                self.items[j + 1] = self.items[j]
+                self.utilities[j + 1] = self.utilities[j]
+                j -= 1
+            self.items[j + 1] = key
+            self.utilities[j + 1] = utilityJ
+        
+
+class _Dataset:
+    """
+    A class represent the list of transactions in this dataset
+
+    :Attributes:
+
+        transactions :
+            the list of transactions in this dataset
+        maxItem:
+            the largest item name
+        
+    :methods:
+
+        createTransaction(line):
+            Create a transaction object from a line from the input file
+        getMaxItem():
+            return Maximum Item
+        getTransactions():
+            return transactions in database
+
+    """
+    transactions = []
+    maxItem = 0
+    
+    def __init__(self,datasetPath: Union[str, _ab._pd.DataFrame], sep: str) -> None:
+        self.strToInt = {}
+        self.intToStr = {}
+        self.transactions = []
+        self.maxItem = 0
+        self.cnt = 1
+        self.sep = sep
+        self.createItemsets(datasetPath)
+
+    def createItemsets(self, datasetPath: Union[str, _ab._pd.DataFrame]) -> None:
+        """
+        Storing the complete transactions of the database/input file in a database variable
+        :param datasetPath: It represents the peth for the dataset
+        :type datasetPath: str
+        :return: None
+        """
+        self.Database = []
+        if isinstance(datasetPath, _ab._pd.DataFrame):
+            utilities, data, transactionUtility = [], [], []
+            if datasetPath.empty:
+                print("its empty..")
+            i = datasetPath.columns.values.tolist()
+            if 'Transactions' in i:
+                data = datasetPath['Transactions'].tolist()
+            if 'Utilities' in i:
+                utilities = datasetPath['Utilities'].tolist()
+            if 'UtilitySum' in i:
+                transactionUtility = datasetPath['UtilitySum'].tolist()
+            self.transactions.append(self.createTransaction(data, utilities, transactionUtility))
+        if isinstance(datasetPath, str):
+            if _ab._validators.url(datasetPath):
+                data = _ab._urlopen(datasetPath)
+                for line in data:
+                    line = line.decode("utf-8")
+                    trans_list = line.strip().split(':')
+                    transactionUtility = int(trans_list[1])
+                    itemsString = trans_list[0].strip().split(self.sep)
+                    itemsString = [x for x in itemsString if x]
+                    utilityString = trans_list[2].strip().split(self.sep)
+                    utilityString = [x for x in utilityString if x]
+                    self.transactions.append(self.createTransaction(itemsString, utilityString, transactionUtility))
+            else:
+                try:
+                    with open(datasetPath, 'r', encoding='utf-8') as f:
+                        for line in f:
+                            trans_list = line.strip().split(':')
+                            transactionUtility = int(trans_list[1])
+                            itemsString = trans_list[0].strip().split(self.sep)
+                            itemsString = [x for x in itemsString if x]
+                            utilityString = trans_list[2].strip().split(self.sep)
+                            utilityString = [x for x in utilityString if x]
+                            self.transactions.append(
+                                self.createTransaction(itemsString, utilityString, transactionUtility))
+
+                except IOError:
+                    print("File Not Found")
+                    quit()
+
+    def createTransaction(self, itemsString: list, utilityString: list, transactionUtility: int) -> '_Transaction':
+        """
+        A method to create Transaction from dataset given
+        :param itemsString: List of strings representing transactions
+        :type itemsString: list
+        :param utilityString: List of strings representing utility
+        :type utilityString: list
+        :param transactionUtility: Integer representing transaction utility
+        :type transactionUtility: int
+        :return: created Transaction from the given dataset
+        :rtype: _Transaction
+        """
+
+
+        '''trans_list = line.strip().split(':')
+        transactionUtility = int(trans_list[1])
+        itemsString = trans_list[0].strip().split(self.sep)
+        itemsString = [x for x in itemsString if x]
+        utilityString = trans_list[2].strip().split(self.sep)
+        utilityString = [x for x in utilityString if x]'''
+        items = []
+        utilities = []
+        for idx, item in enumerate(itemsString):
+            if self.strToInt.get(item) is None:
+                self.strToInt[item] = self.cnt
+                self.intToStr[self.cnt] = item
+                self.cnt += 1
+            item_int = self.strToInt.get(item)
+            if item_int > self.maxItem:
+                self.maxItem = item_int
+            items.append(item_int)
+            utilities.append(int(utilityString[idx]))
+        return _Transaction(items, utilities, transactionUtility)
+
+    def getMaxItem(self) -> int:
+        """
+        A method to return name of the largest item
+        :return: the largest item
+        :rtype: int
+        """
+        return self.maxItem
+
+    def getTransactions(self) -> list:
+        """
+        A method to return transactions from database
+        :return: the list of transactions from database
+        :rtype: list
+        """
+        return self.transactions
+
+
+
+[docs] +class EFIM(_ab._utilityPatterns): + """ + :Description: EFIM is one of the fastest algorithm to mine High Utility ItemSets from transactional databases. + + :Reference: Zida, S., Fournier-Viger, P., Lin, J.CW. et al. EFIM: a fast and memory efficient algorithm for + high-utility itemset mining. Knowl Inf Syst 51, 595–625 (2017). https://doi.org/10.1007/s10115-016-0986-0 + + :param iFile: str : + Name of the Input file to mine complete set of High Utility patterns + :param oFile: str : + Name of the output file to store complete set of High Utility patterns + :param minUtil: int : + The user given minUtil value. + :param candidateCount: int + Number of candidates specified by user + :param maxMemory: int + Maximum memory used by this program for running + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the input file to mine complete set of high utility patterns + oFile : file + Name of the output file to store complete set of high utility patterns + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + minUtil : int + The user given minUtil value + highUtilityitemSets: map + set of high utility itemSets + candidateCount: int + Number of candidates + utilityBinArrayLU: list + A map to hold the local utility values of the items in database + utilityBinArraySU: list + A map to hold the subtree utility values of the items is database + oldNamesToNewNames: list + A map which contains old names, new names of items as key value pairs + newNamesToOldNames: list + A map which contains new names, old names of items as key value pairs + maxMemory: float + Maximum memory used by this program for running + patternCount: int + Number of HUI's + itemsToKeep: list + keep only the promising items ie items having local utility values greater than or equal to minUtil + itemsToExplore: list + list of items that have subtreeUtility value greater than or equal to minUtil + + :Methods : + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + backTrackingEFIM(transactionsOfP, itemsToKeep, itemsToExplore, prefixLength) + A method to mine the HUIs Recursively + useUtilityBinArraysToCalculateUpperBounds(transactionsPe, j, itemsToKeep) + A method to calculate the sub-tree utility and local utility of all items that can extend itemSet P and e + output(tempPosition, utility) + A method to output a high-utility itemSet to file or memory depending on what the user chose + is_equal(transaction1, transaction2) + A method to Check if two transaction are identical + useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(dataset) + A method to calculate the sub tree utility values for single items + sortDatabase(self, transactions) + A Method to sort transaction + sort_transaction(self, trans1, trans2) + A Method to sort transaction + useUtilityBinArrayToCalculateLocalUtilityFirstTime(self, dataset) + A method to calculate local utility values for single itemsets + + **Executing the code on terminal:** + ------------------------------------------ + + .. code-block:: console + + Format: + + (.venv) $ python3 EFIM.py <inputFile> <outputFile> <minUtil> <sep> + + Example Usage: + + (.venv) $ python3 EFIM sampleTDB.txt output.txt 35 + + .. note:: maxMemory will be considered as Maximum memory used by this program for running + + Sample run of importing the code: + ------------------------------------- + .. code-block:: python + + from PAMI.highUtilityPattern.basic import EFIM as alg + + obj=alg.EFIM("input.txt",35) + + obj.mine() + + Patterns = obj.getPatterns() + + print("Total number of high utility Patterns:", len(Patterns)) + + obj.save("output") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + ------------------- + The complete program was written by pradeep pallikila under the supervision of Professor Rage Uday Kiran. + + """ + + _highUtilityitemSets = [] + _candidateCount = 0 + _utilityBinArrayLU = {} + _utilityBinArraySU = {} + _oldNamesToNewNames = {} + _newNamesToOldNames = {} + _strToInt = {} + _intToStr = {} + _Neighbours = {} + _temp = [0] * 5000 + _patternCount = int() + _maxMemory = 0 + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _nFile = " " + _lno = 0 + _sep = "\t" + _minUtil = 0 + _memoryUSS = float() + _memoryRSS = float() + _startTime = _ab._time.time() + + def __init__(self, iFile, minUtil, sep="\t") -> None: + super().__init__(iFile, minUtil, sep) + self._sep = sep + self._highUtilityitemSets = [] + self._candidateCount = 0 + self._utilityBinArrayLU = {} + self._utilityBinArraySU = {} + self._oldNamesToNewNames = {} + self._newNamesToOldNames = {} + self._strToInt = {} + self._intToStr = {} + self._Neighbours = {} + self._temp = [0] * 5000 + self._patternCount = 0 + self._maxMemory = 0 + self._endTime = float() + self._finalPatterns = {} + self._lno = 0 + self._memoryUSS = float() + self._memoryRSS = float() + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + Start the EFIM algorithm. + :return: None + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + Start the EFIM algorithm. + :return: None + """ + self._startTime = _ab._time.time() + self._dataset = _Dataset(self._iFile, self._sep) + self._useUtilityBinArrayToCalculateLocalUtilityFirstTime(self._dataset) + self._minUtil = int(self._minUtil) + itemsToKeep = [] + for key in self._utilityBinArrayLU.keys(): + if self._utilityBinArrayLU[key] >= self._minUtil: + itemsToKeep.append(key) + itemsToKeep = sorted(itemsToKeep, key=lambda x: self._utilityBinArrayLU[x]) + currentName = 1 + for idx, item in enumerate(itemsToKeep): + self._oldNamesToNewNames[item] = currentName + self._newNamesToOldNames[currentName] = item + itemsToKeep[idx] = currentName + currentName += 1 + for transaction in self._dataset.getTransactions(): + transaction.removeUnpromisingItems(self._oldNamesToNewNames) + self._sortDatabase(self._dataset.getTransactions()) + emptyTransactionCount = 0 + for transaction in self._dataset.getTransactions(): + if len(transaction.getItems()) == 0: + emptyTransactionCount += 1 + self._dataset.transactions = self._dataset.transactions[emptyTransactionCount:] + self._useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(self._dataset) + itemsToExplore = [] + for item in itemsToKeep: + if self._utilityBinArraySU[item] >= self._minUtil: + itemsToExplore.append(item) + self._backTrackingEFIM(self._dataset.getTransactions(), itemsToKeep, itemsToExplore, 0) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("High Utility patterns were generated successfully using EFIM algorithm")
+ + + def _backTrackingEFIM(self, transactionsOfP: list, itemsToKeep: list, itemsToExplore: list, prefixLength: int) -> None: + """ + A method to mine the HUIs Recursively + :param transactionsOfP: the list of transactions containing the current prefix P + :type transactionsOfP: list + :param itemsToKeep: the list of secondary items in the p-projected database + :type itemsToKeep: list + :param itemsToExplore: the list of primary items in the p-projected database + :type itemsToExplore: list + :param prefixLength: current prefixLength + :type prefixLength: int + :return: None + """ + self._candidateCount += len(itemsToExplore) + for idx, e in enumerate(itemsToExplore): + transactionsPe = [] + utilityPe = 0 + previousTransaction = transactionsOfP[0] + consecutiveMergeCount = 0 + for transaction in transactionsOfP: + items = transaction.getItems() + if e in items: + positionE = items.index(e) + if transaction.getLastPosition() == positionE: + utilityPe += transaction.getUtilities()[positionE] + transaction.prefixUtility + else: + projectedTransaction = transaction.projectTransaction(positionE) + utilityPe += projectedTransaction.prefixUtility + if previousTransaction == transactionsOfP[0]: + previousTransaction = projectedTransaction + elif self._isEqual(projectedTransaction, previousTransaction): + if consecutiveMergeCount == 0: + items = previousTransaction.items[previousTransaction.offset:] + utilities = previousTransaction.utilities[previousTransaction.offset:] + itemsCount = len(items) + positionPrevious = 0 + positionProjection = projectedTransaction.offset + while positionPrevious < itemsCount: + utilities[positionPrevious] += projectedTransaction.utilities[positionProjection] + positionPrevious += 1 + positionProjection += 1 + previousTransaction.prefixUtility += projectedTransaction.prefixUtility + sumUtilities = previousTransaction.prefixUtility + previousTransaction = _Transaction(items, utilities, previousTransaction.transactionUtility + projectedTransaction.transactionUtility) + previousTransaction.prefixUtility = sumUtilities + else: + positionPrevious = 0 + positionProjected = projectedTransaction.offset + itemsCount = len(previousTransaction.items) + while positionPrevious < itemsCount: + previousTransaction.utilities[positionPrevious] += projectedTransaction.utilities[ + positionProjected] + positionPrevious += 1 + positionProjected += 1 + previousTransaction.transactionUtility += projectedTransaction.transactionUtility + previousTransaction.prefixUtility += projectedTransaction.prefixUtility + consecutiveMergeCount += 1 + else: + transactionsPe.append(previousTransaction) + previousTransaction = projectedTransaction + consecutiveMergeCount = 0 + transaction.offset = positionE + if previousTransaction != transactionsOfP[0]: + transactionsPe.append(previousTransaction) + self._temp[prefixLength] = self._newNamesToOldNames[e] + if utilityPe >= self._minUtil: + self._output(prefixLength, utilityPe) + self._useUtilityBinArraysToCalculateUpperBounds(transactionsPe, idx, itemsToKeep) + newItemsToKeep = [] + newItemsToExplore = [] + for l in range(idx + 1, len(itemsToKeep)): + itemK = itemsToKeep[l] + if self._utilityBinArraySU[itemK] >= self._minUtil: + newItemsToExplore.append(itemK) + newItemsToKeep.append(itemK) + elif self._utilityBinArrayLU[itemK] >= self._minUtil: + newItemsToKeep.append(itemK) + if len(transactionsPe) != 0: + self._backTrackingEFIM(transactionsPe, newItemsToKeep, newItemsToExplore, prefixLength + 1) + + def _useUtilityBinArraysToCalculateUpperBounds(self, transactionsPe: list, j: int, itemsToKeep: list) -> None: + """ + A method to calculate the subtree utility and local utility of all items that can extend itemSet P U {e} + :param transactionsPe: transactions the projected database for P U {e} + :type transactionsPe: list + :param j:the position of j in the list of promising items + :type j:int + :param itemsToKeep :the list of promising items + :type itemsToKeep: list + :return: None + """ + for i in range(j + 1, len(itemsToKeep)): + item = itemsToKeep[i] + self._utilityBinArrayLU[item] = 0 + self._utilityBinArraySU[item] = 0 + for transaction in transactionsPe: + sumRemainingUtility = 0 + i = len(transaction.getItems()) - 1 + while i >= transaction.offset: + item = transaction.getItems()[i] + if item in itemsToKeep: + sumRemainingUtility += transaction.getUtilities()[i] + self._utilityBinArraySU[item] += sumRemainingUtility + transaction.prefixUtility + self._utilityBinArrayLU[item] += transaction.transactionUtility + transaction.prefixUtility + i -= 1 + + def _output(self, tempPosition: int, utility: int) -> None: + """ + Method to print high utility items + :param tempPosition: position of last item + :type tempPosition : int + :param utility: total utility of itemSet + :type utility: int + :return: None + """ + self._patternCount += 1 + s1 = str() + for i in range(0, tempPosition+1): + s1 += self._dataset.intToStr.get((self._temp[i])) + if i != tempPosition: + s1 += "\t" + self._finalPatterns[s1] = str(utility) + + def _isEqual(self, transaction1: '_Transaction', transaction2: '_Transaction') -> bool: + """ + A method to Check if two transaction are identical + :param transaction1: the first transaction + :type transaction1: Trans + :param transaction2: the second transaction + :type transaction2: Trans + :return : whether both are identical or not + :rtype: bool + """ + length1 = len(transaction1.items) - transaction1.offset + length2 = len(transaction2.items) - transaction2.offset + if length1 != length2: + return False + position1 = transaction1.offset + position2 = transaction2.offset + while position1 < len(transaction1.items): + if transaction1.items[position1] != transaction2.items[position2]: + return False + position1 += 1 + position2 += 1 + return True + + def _useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(self, dataset: '_Dataset') -> None: + """ + Scan the initial database to calculate the subtree utility of each item using a utility-bin array + :param dataset: the transaction database + :type dataset: list + :return: None + """ + for transaction in dataset.getTransactions(): + sumSU = 0 + i = len(transaction.getItems()) - 1 + while i >= 0: + item = transaction.getItems()[i] + sumSU += transaction.getUtilities()[i] + if item in self._utilityBinArraySU.keys(): + self._utilityBinArraySU[item] += sumSU + else: + self._utilityBinArraySU[item] = sumSU + i -= 1 + + def _sortDatabase(self, transactions: list) -> None: + """ + A Method to sort transactions + :param transactions: transaction of items + :type transactions: Transaction + :return: None + """ + cmp_items = _ab._functools.cmp_to_key(self.sort_transaction) + transactions.sort(key=cmp_items) + +
+[docs] + def sort_transaction(self, trans1: '_Transaction', trans2: '_Transaction') -> int: + """ + A Method to sort transaction + :param trans1: the first transaction + :type trans1: Trans + :param trans2:the second transaction + :type trans2: Trans + :return: sorted transaction + :rtype: int + """ + trans1_items = trans1.getItems() + trans2_items = trans2.getItems() + pos1 = len(trans1_items) - 1 + pos2 = len(trans2_items) - 1 + if len(trans1_items) < len(trans2_items): + while pos1 >= 0: + sub = trans2_items[pos2] - trans1_items[pos1] + if sub != 0: + return sub + pos1 -= 1 + pos2 -= 1 + return -1 + elif len(trans1_items) > len(trans2_items): + while pos2 >= 0: + sub = trans2_items[pos2] - trans1_items[pos1] + if sub != 0: + return sub + pos1 -= 1 + pos2 -= 1 + return 1 + else: + while pos2 >= 0: + sub = trans2_items[pos2] - trans1_items[pos1] + if sub != 0: + return sub + pos1 -= 1 + pos2 -= 1 + return 0
+ + + def _useUtilityBinArrayToCalculateLocalUtilityFirstTime(self, dataset: '_Dataset') -> None: + """ + A method to calculate local utility of single itemset + :param dataset: the transaction database + :type dataset: dataset + :return: None + """ + for transaction in dataset.getTransactions(): + for item in transaction.getItems(): + if item in self._utilityBinArrayLU: + self._utilityBinArrayLU[item] += transaction.transactionUtility + else: + self._utilityBinArrayLU[item] = transaction.transactionUtility + +
+[docs] + def getPatternsAsDataFrame(self) -> '_pd.DataFrame': + """ + Storing final patterns in a dataframe + :return: returning patterns in a dataframe + :rtype: pd.DataFrame + """ + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Utility']) + + return dataFrame
+ + +
+[docs] + def getPatterns(self) -> dict: + """ + Function to send the set of patterns after completion of the mining process + :return: returning patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self.oFile = outFile + writer = open(self.oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime-self._startTime
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + """ + print("Total number of High Utility Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in seconds:", self.getRuntime())
+
+ + + +if __name__ == '__main__': + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: #includes separator + _ap = EFIM(_ab._sys.argv[1], int(_ab._sys.argv[3]), _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: #takes "\t" as a separator + _ap = EFIM(_ab._sys.argv[1], int(_ab._sys.argv[3])) + _ap.startMine() + _ap.mine() + print("Total number of High Utility Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + _ap = EFIM('/Users/likhitha/Downloads/Utility_T10I4D100K.csv', 50000, '\t') + _ap.startMine() + _ap.mine() + print("Total number of High Utility Patterns:", len(_ap.getPatterns())) + _ap.save('/Users/likhitha/Downloads/UPGrowth_output.txt') + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/highUtilityPattern/basic/HMiner.html b/sphinx/_build/html/_modules/PAMI/highUtilityPattern/basic/HMiner.html new file mode 100644 index 000000000..b8439df86 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/highUtilityPattern/basic/HMiner.html @@ -0,0 +1,869 @@ + + + + + + PAMI.highUtilityPattern.basic.HMiner — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.highUtilityPattern.basic.HMiner

+#  High Utility itemSet Mining (HMinER) is an important algorithm to miner High utility items from the database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.highUtilityPattern.basic import HMiner as alg
+#
+#             obj = alg.HMiner("input.txt", 35)
+#
+#             obj.mine()
+#
+#             Patterns = obj.getPatterns()
+#
+#             print("Total number of high utility Patterns:", len(Patterns))
+#
+#             obj.save("output")
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.highUtilityPattern.basic import abstract as _ab
+from deprecated import deprecated
+
+
+class _Element:
+    """
+    A class represents an Element of a utility list.
+
+    :Attributes :
+
+        ts : int
+            keep tact of transaction id
+        nu : int
+            non-closed itemSet utility
+        nru : int
+             non-closed remaining utility
+        pu : int
+            prefix utility
+        ppos: int
+            position of previous item in the list
+    """
+
+    def __init__(self, tid, nu, nru, pu, ppos):
+        self.tid = tid
+        self.nu = nu
+        self.nru = nru
+        self.pu = pu
+        self.ppos = ppos
+
+
+class _CUList:
+    """
+    A class represents a UtilityList
+
+    :Attributes :
+
+        item: int
+            item 
+        sumNu: long
+            the sum of item utilities
+        sumNru: long
+            the sum of remaining utilities
+        sumCu : long
+            the sum of closed utilities
+        sumCru: long
+            the sum of closed remaining utilities
+        sumCpu: long
+            the sum of closed prefix utilities
+        elements: list
+            the list of elements 
+    :Methods :
+
+        addElement(element)
+            Method to add an element to this utility list and update the sums at the same time.
+    """
+
+    def __init__(self, item):
+        self.item = item
+        self.sumnu = 0
+        self.sumnru = 0
+        self.sumCu = 0
+        self.sumCru = 0
+        self.sumCpu = 0
+        self.elements = []
+
+    def addElements(self, element):
+        """
+        A method to add new element to CUList
+        :param element: element to be added to CUList
+        :type element: Element
+        """
+        self.sumnu += element.nu
+        self.sumnru += element.nru
+        self.elements.append(element)
+
+
+class _Pair:
+    """
+    A class represent an item and its utility in a transaction
+    """
+
+    def __init__(self):
+        self.item = 0
+        self.utility = 0
+
+
+
+[docs] +class HMiner(_ab._utilityPatterns): + """ + :Description: High Utility itemSet Mining (HMIER) is an importent algorithm to miner High utility items from the database. + + :Reference: + + + :param iFile: str : + Name of the Input file to mine complete set of High Utility patterns + :param oFile: str : + Name of the output file to store complete set of High Utility patterns + :param minUtil: int : + The user given minUtil value. + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float. + :param maxPer: float : + The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the input file to mine complete set of frequent patterns + oFile : file + Name of the output file to store complete set of frequent patterns + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + minUtil : int + The user given minUtil + mapFMAP: list + EUCS map of the FHM algorithm + candidates: int + candidates genetated + huiCnt: int + huis created + neighbors: map + keep track of nighboues of elements + + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + Explore_SearchTree(prefix, uList, minUtil) + A method to find all high utility itemSets + UpdateCLosed(x, culs, st, excul, newT, ex, ey_ts, length) + A method to update closed values + saveitemSet(prefix, prefixLen, item, utility) + A method to save itemSets + updateElement(z, culs, st, excul, newT, ex, duppos, ey_ts) + A method to updates vales for duplicates + construcCUL(x, culs, st, minUtil, length, exnighbors) + A method to construct CUL's database + + **Executing the code on terminal:** + -------------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 HMiner.py <inputFile> <outputFile> <minUtil> + + Example Usage: + + (.venv) $ python3 HMiner.py sampleTDB.txt output.txt 35 + + .. note:: minSup will be considered in percentage of database transactions + + + Sample run of importing the code: + -------------------------------------- + .. code-block:: python + + from PAMI.highUtilityPattern.basic import HMiner as alg + + obj = alg.HMiner("input.txt",35) + + obj.mine() + + Patterns = obj.getPatterns() + + print("Total number of high utility Patterns:", len(Patterns)) + + obj.save("output") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + ----------------------------- + The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran. + + """ + + _startTime = float() + _endTime = float() + _minSup = str() + _maxPer = float() + _finalPatterns = {} + _Database = {} + _transactions = [] + _utilities = [] + _utilitySum = [] + _iFile = " " + _oFile = " " + _minUtil = 0 + _sep = "\t" + _memoryUSS = float() + _memoryRSS = float() + + def __init__(self, iFile1, minUtil, sep="\t"): + super().__init__(iFile1, minUtil, sep) + self._huiCount = 0 + self._candidates = 0 + self._mapOfTWU = {} + self._minutil = 0 + self._mapFMAP = {} + self._finalPatterns = {} + + def _HMiner(self, o1, o2) -> int: + """ + A Function that sort all FFI-list in ascending order of Support + + :param o1: First FFI-list + + :type o1: _FFList + + :param o2: Second FFI-list + + :type o1: _FFList + + :return: Comparision Value + + :rtype: int + """ + compare = self._mapOfTWU[o1.item] - self._mapOfTWU[o2.item] + if compare == 0: + return int(o1.item) - int(o2.item) + else: + return compare + + def _creteItemsets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._transactions, self._utilities, self._utilitySum = [], [], [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._transactions = self._iFile['Transactions'].tolist() + if 'Utilities' in i: + self._utilities = self._iFile['Utilities'].tolist() + if 'UtilitySum' in i: + self._utilitySum = self._iFile['UtilitySum'].tolist() + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + #print("hey") + data = _ab._urlopen(self._iFile) + for line in data: + line = line.decode("utf-8") + line = line.split("\n")[0] + parts = line.split(":") + items = parts[0].split(self._sep) + self._transactions.append([x for x in items if x]) + utilities = parts[2].split(self._sep) + self._utilities.append(utilities) + self._utilitySum.append(int(parts[1])) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.split("\n")[0] + parts = line.split(":") + items = parts[0].split(self._sep) + self._transactions.append([x for x in items if x]) + utilities = parts[2].split(self._sep) + self._utilities.append(utilities) + self._utilitySum.append(int(parts[1])) + except IOError: + print("File Not Found") + quit() + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + Main program to start the operation + """ + self.mine()
+ + +
+[docs] + def mine(self): + """ + Main program to start the operation + """ + self._startTime = _ab._time.time() + self._creteItemsets() + self._finalPatterns = {} + for line in range(len(self._transactions)): + items_str = self._transactions[line] + utility_str = self._utilities[line] + transUtility = self._utilitySum[line] + for i in range(0, len(items_str)): + item = items_str[i] + twu = self._mapOfTWU.get(item) + if twu == None: + twu = transUtility + else: + twu += transUtility + self._mapOfTWU[item] = twu + listOfCUList = [] + hashTable = {} + mapItemsToCUList = {} + minutil = self._minUtil + for item in self._mapOfTWU.keys(): + if self._mapOfTWU.get(item) >= self._minUtil: + uList = _CUList(item) + mapItemsToCUList[item] = uList + listOfCUList.append(uList) + listOfCUList.sort(key=_ab._functools.cmp_to_key(self._HMiner)) + tid = 1 + for line in range(len(self._transactions)): + items = self._transactions[line] + utilities = self._utilities[line] + ru = 0 + newTwu = 0 + tx_key = [] + revisedTrans = [] + for i in range(0, len(items)): + pair = _Pair() + pair.item = items[i] + pair.utility = int(utilities[i]) + if self._mapOfTWU.get(pair.item) >= self._minUtil: + revisedTrans.append(pair) + tx_key.append(pair.item) + newTwu += pair.utility + revisedTrans.sort(key=_ab._functools.cmp_to_key(self._HMiner)) + tx_key1 = tuple(tx_key) + if len(revisedTrans) > 0: + if tx_key1 not in hashTable.keys(): + hashTable[tx_key1] = len(mapItemsToCUList[revisedTrans[len(revisedTrans) - 1].item].elements) + for i in range(len(revisedTrans) - 1, -1, -1): + pair = revisedTrans[i] + cuListoFItems = mapItemsToCUList.get(pair.item) + element = _Element(tid, pair.utility, ru, 0, 0) + if i > 0: + element.ppos = len(mapItemsToCUList[revisedTrans[i - 1].item].elements) + else: + element.ppos = - 1 + cuListoFItems.addElements(element) + ru += pair.utility + else: + pos = hashTable[tx_key1] + ru = 0 + for i in range(len(revisedTrans) - 1, -1, -1): + cuListoFItems = mapItemsToCUList[revisedTrans[i].item] + cuListoFItems.elements[pos].nu += revisedTrans[i].utility + cuListoFItems.elements[pos].nru += ru + cuListoFItems.sumnu += revisedTrans[i].utility + cuListoFItems.sumnru += ru + ru += revisedTrans[i].utility + pos = cuListoFItems.elements[pos].ppos + # EUCS + for i in range(len(revisedTrans) - 1, -1, -1): + pair = revisedTrans[i] + mapFMAPItem = self._mapFMAP.get(pair.item) + if mapFMAPItem == None: + mapFMAPItem = {} + self._mapFMAP[pair.item] = mapFMAPItem + for j in range(i + 1, len(revisedTrans)): + pairAfter = revisedTrans[j] + twuSUm = mapFMAPItem.get(pairAfter.item) + if twuSUm is None: + mapFMAPItem[pairAfter.item] = newTwu + else: + mapFMAPItem[pairAfter.item] = twuSUm + newTwu + tid += 1 + self._ExploreSearchTree([], listOfCUList, minutil) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryRSS = float() + self._memoryUSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("High Utility patterns were generated successfully using HMiner algorithm")
+ + + def _ExploreSearchTree(self, prefix, uList, minutil): + """ + A method to find all high utility itemSets + :parm prefix:it represents all items in prefix + :type prefix:list + :parm uList:projected Utility list + :type uList: lists + :parm minutil:user minUtil + :type minutil:int + """ + for i in range(0, len(uList)): + x = uList[i] + soted_prefix = [0] * (len(prefix) + 1) + soted_prefix = prefix[0:len(prefix) + 1] + soted_prefix.append(x.item) + if x.sumnu + x.sumCu >= minutil: + self._saveitemSet(prefix, len(prefix), x.item, x.sumnu + x.sumCu) + self._candidates += 1 + if x.sumnu + x.sumCu + x.sumnru + x.sumCru >= minutil: + exULs = self._construcCUL(x, uList, i, minutil, len(soted_prefix)) + self._ExploreSearchTree(soted_prefix, exULs, minutil) + + def _construcCUL(self, x, culs, st, minutil, length): + """ + A method to construct CUL's database + :parm x: Compact utility list + :type x: Node + :parm culs:list of Compact utility list + :type culs:lists + :parm st: starting pos of culs + :type st:int + :parm minutil: user minUtil + :type minutil:int + :parm length: length of x + :type length:int + :return: projectd database of list X + :rtype: list + """ + excul = [] + lau = [] + cutil = [] + ey_tid = [] + for i in range(0, len(culs)): + uList = _CUList(culs[i].item) + excul.append(uList) + lau.append(0) + cutil.append(0) + ey_tid.append(0) + sz = len(culs) - (st + 1) + exSZ = sz + for j in range(st + 1, len(culs)): + mapOfTWUF = self._mapFMAP[x.item] + if mapOfTWUF != None: + twuf = mapOfTWUF.get(culs[j].item) + if twuf != None and twuf < minutil: + excul[j] = None + exSZ = sz - 1 + else: + uList = _CUList(culs[j].item) + excul[j] = uList + ey_tid[j] = 0 + lau[j] = x.sumCu + x.sumCru + x.sumnu + x.sumnru + cutil[j] = x.sumCu + x.sumCru + hashTable = {} + for ex in x.elements: + newT = [] + for j in range(st + 1, len(culs)): + if excul[j] is None: + continue + eylist = culs[j].elements + while ey_tid[j] < len(eylist) and eylist[ey_tid[j]].tid < ex.tid: + ey_tid[j] = ey_tid[j] + 1 + if ey_tid[j] < len(eylist) and eylist[ey_tid[j]].tid == ex.tid: + newT.append(j) + else: + lau[j] = lau[j] - ex.nu - ex.nru + if lau[j] < minutil: + excul[j] = None + exSZ = exSZ - 1 + if len(newT) == exSZ: + self._UpdateCLosed(x, culs, st, excul, newT, ex, ey_tid, length) + else: + if len(newT) == 0: + continue + ru = 0 + newT1 = tuple(newT) + if newT1 not in hashTable.keys(): + hashTable[newT1] = len(excul[newT[len(newT) - 1]].elements) + for i in range(len(newT) - 1, -1, -1): + cuListoFItems = excul[newT[i]] + y = culs[newT[i]].elements[ey_tid[newT[i]]] + element = _Element(ex.tid, ex.nu + y.nu - ex.pu, ru, ex.nu, 0) + if i > 0: + element.ppos = len(excul[newT[i - 1]].elements) + else: + element.ppos = - 1 + cuListoFItems.addElements(element) + ru += y.nu - ex.pu + else: + dppos = hashTable[newT1] + self._updateElement(x, culs, st, excul, newT, ex, dppos, ey_tid) + for j in range(st + 1, len(culs)): + cutil[j] = cutil[j] + ex.nu + ex.nru + filter_culs = [] + for j in range(st + 1, len(culs)): + if cutil[j] < minutil or excul[j] is None: + continue + else: + if length > 1: + excul[j].sumCu += culs[j].sumCu + x.sumCu - x.sumCpu + excul[j].sumCru += culs[j].sumCru + excul[j].sumCpu += x.sumCu + filter_culs.append(excul[j]) + return filter_culs + + def _UpdateCLosed(self, x, culs, st, excul, newT, ex, ey_tid, length): + """ + A method to update closed values + :parm x: Compact utility list + :type x: lists + :parm culs:list of Compact utility list + :type culs:lists + :parm st: starting pos of culs + :type st:int + :parm excul: list of culs + :type excul: list + :parm newT:transaction to be updated + :type newT:list + :parm ex: element ex + :type ex:element + :parm ey_tid:list of tss + :type ey_tid:ts + :parm length: length of x + :type length:int + """ + nru = 0 + for j in range(len(newT) - 1, -1, -1): + ey = culs[newT[j]] + eyy = ey.elements[ey_tid[newT[j]]] + excul[newT[j]].sumCu += ex.nu + eyy.nu - ex.pu + excul[newT[j]].sumCru += nru + excul[newT[j]].sumCpu += ex.nu + nru = nru + eyy.nu - ex.pu + + def _updateElement(self, z, culs, st, excul, newT, ex, duppos, ey_tid): + """ + A method to updates vales for duplicates + + :Attributes: + + :parm z: Compact utility list + :type z: lists + :parm culs:list of Compact utility list + :type culs:lists + :parm st: starting pos of culs + :type st:int + :parm excul:list of culs + :type excul:list + :parm newT:transaction to be updated + :type newT:list + :parm ex: element ex + :type ex:element + :parm duppos: position of z in excul + :type duppos:int + :parm ey_tid:list of tss + :type ey_tid:ts + """ + nru = 0 + pos = duppos + for j in range(len(newT) - 1, -1, -1): + ey = culs[newT[j]] + eyy = ey.elements[ey_tid[newT[j]]] + excul[newT[j]].elements[pos].nu += ex.nu + eyy.nu - ex.pu + excul[newT[j]].sumnu += ex.nu + eyy.nu - ex.pu + excul[newT[j]].elements[pos].nru += nru + excul[newT[j]].sumnru += nru + excul[newT[j]].elements[pos].pu += ex.nu + nru = nru + eyy.nu - ex.pu + pos = excul[newT[j]].elements[pos].ppos + + def _saveitemSet(self, prefix, prefixLen, item, utility): + """ + A method to save itemSets + :parm prefix: it represents all items in prefix + :type prefix :list + :parm prefixLen: length of prefix + :type prefixLen:int + :parm item:item + :type item: int + :parm utility:utility of itemSet + :type utility:int + """ + self._huiCount += 1 + res = str() + for i in range(0, prefixLen): + res += str(prefix[i]) + "\t" + res += str(item) + self._finalPatterns[str(res)] = str(utility) + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Utility']) + return dataFrame
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to an output file + :param outFile: name of the output file + :type outFile: csv file + """ + self.oFile = outFile + writer = open(self.oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y) + writer.write("%s\n" % patternsAndSupport)
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime - self._startTime
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Total number of High Utility Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in seconds:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: # includes separator + _ap = HMiner(_ab._sys.argv[1], int(_ab._sys.argv[3]), _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: # to consider "\t" as a separator + _ap = HMiner(_ab._sys.argv[1], int(_ab._sys.argv[3])) + _ap.startMine() + _ap.mine() + print("Total number of huis:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/highUtilityPattern/basic/UPGrowth.html b/sphinx/_build/html/_modules/PAMI/highUtilityPattern/basic/UPGrowth.html new file mode 100644 index 000000000..2e3839489 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/highUtilityPattern/basic/UPGrowth.html @@ -0,0 +1,914 @@ + + + + + + PAMI.highUtilityPattern.basic.UPGrowth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.highUtilityPattern.basic.UPGrowth

+# UP-Growth is two-phase algorithm to mine High Utility Itemsets from transactional databases.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.highUtilityPattern.basic import UPGrowth as alg
+#
+#             obj=alg.UPGrowth("input.txt",35)
+#
+#             obj.mine()
+#
+#             highUtilityPattern = obj.getPatterns()
+#
+#             print("Total number of Spatial Frequent Patterns:", len(highUtilityPattern))
+#
+#             obj.save("output")
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.highUtilityPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+from deprecated import deprecated
+
+
+class _UPItem:
+    """
+    A class to represent the UPItem
+
+    :Attributes:
+
+        name: int
+            name of item
+        utility: int
+            utility of item
+
+    :Methods:
+
+    getUtility()
+        method to get node utility
+    setUtility()
+        method to set node utility
+    getName()
+        method to get name of particular item
+    """
+    name = 0
+    utility = 0
+
+    def __init__(self, name: int, utility: int):
+        self.name = name
+        self.utility = utility
+
+    def getUtility(self) -> int:
+        """
+        method to get node utility
+        :return: Utility value of Node
+        :rtype: int
+        """
+        return self.utility
+
+    def setUtility(self, utility: int) -> None:
+        """
+        method to set node utility
+        :param utility: the utility to set
+        :type utility: int
+        :return: None
+        """
+        self.utility = utility
+
+    def getName(self) -> int:
+        """
+        method to get name for particular item
+        :return: name of particular item
+        :rtype: int
+        """
+        return self.name
+
+
+class _UPNode:
+    """
+    A class that represent UPNode
+
+    :Attributes:
+
+        itemId :int
+            name of the item
+        count: int
+            represent the count of items
+        nodeUtility: int
+            Represent the utility of current node
+        nodeLink: UPNode
+            represent the link to next node with same itemid
+        childs: list
+            link to next node with same item id (for the header table)
+    :Methods:
+
+        getChildWithId( name):
+            method to get child node Return the immediate child of this node having a given name
+    """
+    itemId = -1
+    count = 1
+    nodeUtility = 0
+    childs = []
+    nodeLink = -1
+    parent = -1
+
+    def __init__(self) -> None:
+        self.itemId = -1
+        self.count = 1
+        self.nodeUtility = 0
+        self.childs = []
+        self.nodeLink = -1
+        self.parent = -1
+
+    def getChildWithId(self, name: int) -> int:
+        """
+        method to get child node Return the immediate child of this node having a given name
+        :param name: represent id of item
+        :type name: int
+        :return: id of child node with same itemid
+        :rtype: int
+        """
+        for child in self.childs:
+            if child.itemId == name:
+                return child
+        return -1
+
+
+class _UPTree:
+    """
+    A class to represent UPTree
+
+    :Attributes:
+
+        headerList: list
+            Lists of items in the header table
+        mapItemNodes: map
+            Maps that indicates the last node for each item using the node links
+        root : UPNode
+            root of the tree
+        mapItemToLastNode: map
+            List of pairs (item, Utility) of the header table
+        hasMoreThanOnePath :bool
+            Variable that indicate if the tree has more than one path
+
+    :Methods:
+
+        addTransaction(transaction,rtu)
+            To add a transaction (for initial construction)
+        addLocalTransaction(localPath, pathUtility, mapItemToMinimumItemUtility, pathCount)
+            Add a transaction to the UP-Tree (for a local UP-Tree)
+        insertNewNode(currentLocalNode, itemName, nodeUtility)
+            Insert a new node in the UP-Tree as child of a parent node
+        createHeaderList(mapItemToTwu)
+            Method for creating the list of items in the header table, in descending order of TWU or path utility.
+    """
+    headerList = []
+    hasMoreThanOnePath = False
+    mapItemNodes = {}
+    root = _UPNode()
+    mapItemToLastNode = {}
+
+    def __init__(self) -> None:
+        self.headerList = []
+        self.hasMoreThanOnePath = False
+        self.mapItemToLastNode = {}
+        self.mapItemNodes = {}
+        self.root = _UPNode()
+
+    def addTransaction(self, transaction: list, RTU: int) -> int:
+        """
+        A Method to add new Transaction to tree
+        :param transaction: the reorganised transaction
+        :type transaction: list
+        :param RTU :reorganised transaction utility
+        :type RTU: int
+        :return: the number of transactions added
+        :rtype: int
+        """
+        currentNode = self.root
+        NumberOfNodes = 0
+        RemainingUtility = 0
+        # for idx, item in enumerate(transaction):
+        #     itemName = item.name
+        #     child = currentNode.getChildWithId(itemName)
+        #     RemainingUtility += item.utility
+        #     if child == -1:
+        #         NumberOfNodes += 1
+        #         nodeUtility = RemainingUtility
+        #         currentNode = self.insertNewNode(currentNode, itemName, nodeUtility)
+        #     else:
+        #         child.count += 1
+        #         child.nodeUtility += RemainingUtility
+        #         currentNode = child
+        for idx, item in enumerate(transaction):
+            for k in range(idx + 1, len(transaction)):
+                RemainingUtility += transaction[k].getUtility()
+            itemName = item.name
+            child = currentNode.getChildWithId(itemName)
+            if child == -1:
+                NumberOfNodes += 1
+                nodeUtility = RTU - RemainingUtility
+                RemainingUtility = 0
+                currentNode = self.insertNewNode(currentNode, itemName, nodeUtility)
+            else:
+                currentNU = child.nodeUtility
+                nodeUtility = currentNU + (RTU - RemainingUtility)
+                RemainingUtility = 0
+                child.count += 1
+                child.nodeUtility = nodeUtility
+                currentNode = child
+        return NumberOfNodes
+
+    def addLocalTransaction(self, localPath: list, pathUtility: int, mapItemToMinimumItemutility: dict, pathCount: int) -> int:
+        """
+        A Method to add addLocalTransaction to tree
+        :param localPath: The path to insert
+        :type localPath: list
+        :param pathUtility: the Utility of path
+        :type pathUtility: int
+        :param mapItemToMinimumItemutility: the map storing minimum item utility
+        :type mapItemToMinimumItemutility: map
+        :param pathCount: the Path count
+        :type pathCount: int
+        :return: The number of nodes in the tree that have been added or removed from the tree
+        :rtype: int
+        """
+        currentLocalNode = self.root
+        RemainingUtility = 0
+        NumberOfNodes = 0
+        # for item in localPath:
+        #     RemainingUtility += mapItemToMinimumItemutility[item] * pathCount
+        # for item in localPath:
+        #     RemainingUtility -= mapItemToMinimumItemutility[item] * pathCount
+        #     child = currentLocalNode.getChildWithId(item)
+        #     if child == -1:
+        #         NumberOfNodes += 1
+        #         currentLocalNode = self.insertNewNode(currentLocalNode, item, pathUtility - RemainingUtility)
+        #     else:
+        #         child.count += 1
+        #         child.nodeUtility += (pathUtility - RemainingUtility)
+        #         currentLocalNode = child
+        for idx, item in enumerate(localPath):
+            for k in range(idx + 1, len(localPath)):
+                search = localPath[k]
+                RemainingUtility += mapItemToMinimumItemutility[search] * pathCount
+            child = currentLocalNode.getChildWithId(item)
+            if child == -1:
+                NumberOfNodes += 1
+                nodeUtility = pathUtility - RemainingUtility
+                RemainingUtility = 0
+                currentLocalNode = self.insertNewNode(currentLocalNode, item, nodeUtility)
+            else:
+                currentNU = child.nodeUtility
+                nodeUtility = currentNU + (pathUtility - RemainingUtility)
+                RemainingUtility = 0
+                child.count += 1
+                child.nodeUtility = nodeUtility
+                currentLocalNode = child
+        return NumberOfNodes
+
+    def insertNewNode(self, currentlocalNode: _UPNode, itemName: int, nodeUtility: int) -> _UPNode:
+        """
+        A method to Insert a new node in the UP-Tree as child of a parent node
+        :param currentlocalNode: The parent Node
+        :type currentlocalNode: UPNode
+        :param itemName: name of item in new Node
+        :type itemName: int
+        :param nodeUtility: Utility of new node
+        :type nodeUtility: int
+        :return: The newly created UPNode
+        :rtype: _UPNode
+        """
+        newNode = _UPNode()
+        newNode.itemId = itemName
+        newNode.count = 1
+        newNode.nodeUtility = nodeUtility
+        newNode.parent = currentlocalNode
+        currentlocalNode.childs.append(newNode)
+        if not self.hasMoreThanOnePath and len(currentlocalNode.childs) > 1:
+            self.hasMoreThanOnePath = True
+        if itemName in self.mapItemNodes:
+            lastNode = self.mapItemToLastNode[itemName]
+            lastNode.nodeLink = newNode
+            self.mapItemToLastNode[itemName] = newNode
+        else:
+            self.mapItemNodes[itemName] = newNode
+            self.mapItemToLastNode[itemName] = newNode
+        return newNode
+
+    def createHeaderList(self, mapItemToTwu: dict) -> None:
+        """
+        A Method for creating the list of items in the header table, in descending order of TWU or path utility.
+        :param mapItemToTwu: the Utilities of each item
+        :type mapItemToTwu: map
+        :return: None
+        """
+        self.headerList = list(self.mapItemNodes.keys())
+        self.headerList = sorted(self.headerList, key=lambda x: mapItemToTwu[x], reverse=True)
+
+
+
+[docs] +class UPGrowth(_ab._utilityPatterns): + """ + :Description: UP-Growth is two-phase algorithm to mine High Utility Itemsets from transactional databases. + + :Reference: Vincent S. Tseng, Cheng-Wei Wu, Bai-En Shie, and Philip S. Yu. 2010. UP-Growth: an efficient algorithm for high utility itemset mining. + In Proceedings of the 16th ACM SIGKDD international conference on Knowledge discovery and data mining (KDD '10). + Association for Computing Machinery, New York, NY, USA, 253–262. DOI:https://doi.org/10.1145/1835804.1835839 + + :param iFile: str : + Name of the Input file to mine complete set of High Utility patterns + :param oFile: str : + Name of the output file to store complete set of High Utility patterns + :param minUtil: int : + The user given minUtil value. + :param candidateCount: int + Number of candidates specified by user + :param maxMemory: int + Maximum memory used by this program for running + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the input file to mine complete set of frequent patterns + oFile : file + Name of the output file to store complete set of frequent patterns + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + minUtil : int + The user given minUtil + NumberOfNodes : int + Total number of nodes generated while building the tree + ParentNumberOfNodes : int + Total number of nodes required to build the parent tree + MapItemToMinimumUtility : map + A map to store the minimum utility of item in the database + phuis : list + A list to store the phuis + MapItemToTwu : map + A map to store the twu of each item in database + + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + createLocalTree(tree, item) + A Method to Construct conditional pattern base + UPGrowth( tree, alpha) + A Method to Mine UP Tree recursively + PrintStats() + A Method to print number of phuis + save(oFile) + Complete set of frequent patterns will be loaded in to an output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + + **Executing the code on terminal:** + --------------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 UPGrowth <inputFile> <outputFile> <Neighbours> <minUtil> <sep> + + Example Usage: + + (.venv) $ python3 UPGrowth sampleTDB.txt output.txt sampleN.txt 35 + + .. note:: maxMemory will be considered as Maximum memory used by this program for running + + + Sample run of importing the code: + ------------------------------------- + .. code-block:: python + + from PAMI.highUtilityPattern.basic import UPGrowth as alg + + obj=alg.UPGrowth("input.txt",35) + + obj.mine() + + highUtilityPattern = obj.getPatterns() + + print("Total number of Spatial Frequent Patterns:", len(highUtilityPattern)) + + obj.save("output") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + ----------------------- + The complete program was written by Pradeep pallikila under the supervision of Professor Rage Uday Kiran. + + """ + + _maxMemory = 0 + _startTime = 0 + _endTime = 0 + _minUtil = 0 + _memoryUSS = float() + _memoryRSS = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _NumberOfNodes = 0 + _ParentNumberOfNodes = 0 + _MapItemToMinimumUtility = {} + _MapItemsetsToUtilities = _ab._defaultdict(int) + _phuis = [] + _Database = [] + _MapItemToTwu = {} + _sep = " " + + def __init__(self, iFile: str, minUtil: int, sep: str='\t') -> None: + super().__init__(iFile, minUtil, sep) + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + :return: None + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + timeStamp, data = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + if 'Utilities' in i: + data = self._iFile['Utilities'].tolist() + if 'UtilitySum' in i: + data = self._iFile['UtilitySum'].tolist() + for i in range(len(data)): + tr = [timeStamp[i]] + tr.append(data[i]) + self._Database.append(tr) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line = line.decode("utf-8") + self._Database.append(line) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + self._Database.append(line) + except IOError: + print("File Not Found") + quit() + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + Mining process will start from here + :return: None + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + Mining process will start from here + :return: None + """ + self._startTime = _ab._time.time() + tree = _UPTree() + self._creatingItemSets() + self._finalPatterns = {} + for line in self._Database: + line = line.split("\n")[0] + transaction = line.strip().split(':') + items = transaction[0].split(self._sep) + transactionUtility = int(transaction[1]) + for item in items: + Item = int(item) + if Item in self._MapItemToTwu: + self._MapItemToTwu[Item] += transactionUtility + else: + self._MapItemToTwu[Item] = transactionUtility + for line in self._Database: + line = line.split("\n")[0] + transaction = line.strip().split(':') + items = transaction[0].split(self._sep) + utilities = transaction[2].split(self._sep) + remainingUtility = 0 + revisedTransaction = [] + for idx, item in enumerate(items): + Item = int(item) + utility = int(utilities[idx]) + if self._MapItemToTwu[Item] >= self._minUtil: + element = _UPItem(Item, utility) + revisedTransaction.append(element) + remainingUtility += utility + if Item in self._MapItemToMinimumUtility: + minItemUtil = self._MapItemToMinimumUtility[Item] + if minItemUtil >= utility: + self._MapItemToMinimumUtility[Item] = utility + else: + self._MapItemToMinimumUtility[Item] = utility + revisedTransaction = sorted(revisedTransaction, key=lambda x: self._MapItemToTwu[x.name], reverse=True) + self._ParentNumberOfNodes += tree.addTransaction(revisedTransaction, remainingUtility) + tree.createHeaderList(self._MapItemToTwu) + alpha = [] + self._finalPatterns = {} + # print("number of nodes in parent tree", self.ParentNumberOfNodes) + self._UPGrowth(tree, alpha) + # self.phuis = sorted(self.phuis, key=lambda x: len(x)) + # print(self.phuis[0:10]) + for line in self._Database: + line = line.split("\n")[0] + transaction = line.strip().split(':') + items = transaction[0].split(self._sep) + utilities = transaction[2].split(self._sep) + mapItemToUtility = {} + for idx, item in enumerate(items): + Item = int(item) + utility = int(utilities[idx]) + if self._MapItemToTwu[Item] >= self._minUtil: + mapItemToUtility[Item] = utility + for itemset in self._phuis: + l = len(itemset) + count = 0 + utility = 0 + for item in itemset: + item = int(item) + if item in mapItemToUtility: + utility += mapItemToUtility[item] + count += 1 + if count == l: + self._MapItemsetsToUtilities[tuple(itemset)] += utility + + for itemset in self._phuis: + util = self._MapItemsetsToUtilities[tuple(itemset)] + if util >= self._minUtil: + s = str() + for item in itemset: + s = s + str(item) + s = s + "\t" + self._finalPatterns[s] = util + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("High Utility patterns were generated successfully using UPGrowth algorithm")
+ + + def _UPGrowth(self, tree: _UPTree, alpha: list) -> None: + """ + A Method to Mine UP Tree recursively + :param tree: UPTree to mine + :type tree: UPTree + :param alpha: prefix itemset + :type alpha: list + :return: None + """ + for item in reversed(tree.headerList): + localTree = self._createLocalTree(tree, item) + node = tree.mapItemNodes[item] + ItemTotalUtility = 0 + while node != -1: + ItemTotalUtility += node.nodeUtility + node = node.nodeLink + if ItemTotalUtility >= self._minUtil: + beta = alpha + [item] + self._phuis.append(beta) + # str1 = ' '.join(map(str, beta)) + # self.finalPatterns[str1] = ItemTotalUtility + if len(localTree.headerList) > 0: + self._UPGrowth(localTree, beta) + + def _createLocalTree(self, tree: _UPTree, item: int) -> _UPTree: + """ + A Method to Construct conditional pattern base + :param tree: the UPtree + :type tree: UP Tree + :param item: item that need to construct conditional patterns + :type item: int + :return: the conditional pattern based UPTree + :rtype: _UPTree + """ + prefixPaths = [] + path = tree.mapItemNodes[item] + itemPathUtility = {} + while path != -1: + nodeUtility = path.nodeUtility + if path.parent != -1: + prefixPath = [] + prefixPath.append(path) + ParentNode = path.parent + while ParentNode.itemId != -1: + prefixPath.append(ParentNode) + itemName = ParentNode.itemId + if itemName in itemPathUtility: + itemPathUtility[itemName] += nodeUtility + else: + itemPathUtility[itemName] = nodeUtility + ParentNode = ParentNode.parent + prefixPaths.append(prefixPath) + path = path.nodeLink + localTree = _UPTree() + for prefixPath in prefixPaths: + pathUtility = prefixPath[0].nodeUtility + pathCount = prefixPath[0].count + localPath = [] + for i in range(1, len(prefixPath)): + node = prefixPath[i] + if itemPathUtility[node.itemId] >= self._minUtil: + localPath.append(node.itemId) + else: + pathUtility -= pathCount * self._MapItemToMinimumUtility[node.itemId] + localPath = sorted(localPath, key=lambda x: itemPathUtility[x], reverse=True) + self._NumberOfNodes += localTree.addLocalTransaction(localPath, pathUtility, self._MapItemToMinimumUtility, + pathCount) + localTree.createHeaderList(itemPathUtility) + return localTree + +
+[docs] + def PrintStats(self) -> None: + """ + A Method to print number of phuis + :return: None + """ + print('number of PHUIS are ' + str(len(self._phuis)))
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final frequent patterns in a dataframe + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Utility']) + return dataFrame
+ + +
+[docs] + def getPatterns(self) -> dict: + """ + Function to send the set of frequent patterns after completion of the mining process + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self.oFile = outFile + writer = open(self.oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + " : " + str(y) + writer.write("%s\n" % patternsAndSupport)
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime - self._startTime
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + :return: None + """ + print("Total number of High Utility Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in seconds:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = UPGrowth(_ab._sys.argv[1], int(_ab._sys.argv[3]), _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = UPGrowth(_ab._sys.argv[1], int(_ab._sys.argv[3])) + _ap.startMine() + _ap.mine() + print("Total number of High Utility Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + _ap = UPGrowth('/Users/likhitha/Downloads/Utility_T10I4D100K.csv', 50000, '\t') + _ap.startMine() + _ap.mine() + print("Total number of High Utility Patterns:", len(_ap.getPatterns())) + _ap.save('/Users/likhitha/Downloads/UPGrowth_output.txt') + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/highUtilitySpatialPattern/abstract.html b/sphinx/_build/html/_modules/PAMI/highUtilitySpatialPattern/abstract.html new file mode 100644 index 000000000..5068a1f87 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/highUtilitySpatialPattern/abstract.html @@ -0,0 +1,361 @@ + + + + + + PAMI.highUtilitySpatialPattern.abstract — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.highUtilitySpatialPattern.abstract

+#  Copyright (C)  2021 Rage Uday Kiran
+#
+#      This program is free software: you can redistribute it and/or modify
+#      it under the terms of the GNU General Public License as published by
+#      the Free Software Foundation, either version 3 of the License, or
+#      (at your option) any later version.
+#
+#      This program is distributed in the hope that it will be useful,
+#      but WITHOUT ANY WARRANTY; without even the implied warranty of
+#      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#      GNU General Public License for more details.
+#
+#      You should have received a copy of the GNU General Public License
+#      along with this program.  If not, see <https://www.gnu.org/licenses/>.
+#
+#      This program is free software: you can redistribute it and/or modify
+#      it under the terms of the GNU General Public License as published by
+#      the Free Software Foundation, either version 3 of the License, or
+#      (at your option) any later version.
+#
+#      This program is distributed in the hope that it will be useful,
+#      but WITHOUT ANY WARRANTY; without even the implied warranty of
+#      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#      GNU General Public License for more details.
+#
+#      You should have received a copy of the GNU General Public License
+#      along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+from abc import ABC, abstractmethod
+import time
+import validators
+from urllib.request import urlopen
+import csv
+import pandas as pd
+from collections import defaultdict
+from itertools import combinations as c
+import os
+import os.path
+import psutil
+import sys
+import validators
+from urllib.request import urlopen
+
+
+
+[docs] +class utilityPatterns(ABC): + """ + :Description: This abstract base class defines the variables and methods that every frequent pattern mining algorithm must + employ in PAMI + + :Attributes: + + iFile : str + Input file name or path of the input file + minUtil: integer + The user can specify minUtil either in count + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator + startTime:float + To record the start time of the algorithm + endTime:float + To record the completion time of the algorithm + finalPatterns: dict + Storing the complete set of patterns in a dictionary variable + oFile : str + Name of the output file to store complete set of frequent patterns + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + :Methods: + + startMine() + Calling this function will start the actual mining process + getPatterns() + This function will output all interesting patterns discovered by an algorithm + save(oFile) + This function will store the discovered patterns in an output file specified by the user + getPatternsAsDataFrame() + The function outputs the patterns generated by an algorithm as a data frame + getMemoryUSS() + This function outputs the total amount of USS memory consumed by a mining algorithm + getMemoryRSS() + This function outputs the total amount of RSS memory consumed by a mining algorithm + getRuntime() + This function outputs the total runtime of a mining algorithm + + """ + + def __init__(self, iFile, nFile, minUtil, sep="\t"): + """ + + :param iFile: Input file name or path of the input file + :type iFile: str + :param nFile: Input file name or path of the neighbourhood file + :type nFile: str + :param minUtil: The user can specify minUtil in count + :type minUtil: int + :param sep: separator used to distinguish items from each other. The default separator is tab space. However, users can override the default separator + :type sep: str + + """ + + self.iFile = iFile + self.sep = sep + self.nFile = nFile + self.minUtil = minUtil + +
+[docs] + @abstractmethod + def iFile(self): + """Variable to store the input file path/file name""" + + pass
+ + +
+[docs] + @abstractmethod + def nFile(self): + """Variable to store the neighbourhood file path/file name""" + + pass
+ + +
+[docs] + @abstractmethod + def minUtil(self): + """Variable to store the user-specified minimum support value""" + + pass
+ + +
+[docs] + @abstractmethod + def startTime(self): + """Variable to store the start time of the mining process""" + + pass
+ + +
+[docs] + @abstractmethod + def endTime(self): + """Variable to store the end time of the complete program""" + + pass
+ + +
+[docs] + @abstractmethod + def memoryUSS(self): + """Variable to store USS memory consumed by the program""" + + pass
+ + +
+[docs] + @abstractmethod + def memoryRSS(self): + """Variable to store RSS memory consumed by the program""" + + pass
+ + +
+[docs] + @abstractmethod + def finalPatterns(self): + """Variable to store the complete set of patterns in a dictionary""" + + pass
+ + +
+[docs] + @abstractmethod + def oFile(self): + """Variable to store the name of the output file to store the complete set of frequent patterns""" + + pass
+ + +
+[docs] + @abstractmethod + def startMine(self): + """Code for the mining process will start from this function""" + + pass
+ + +
+[docs] + @abstractmethod + def getPatterns(self): + """Complete set of frequent patterns generated will be retrieved from this function""" + + pass
+ + +
+[docs] + @abstractmethod + def save(self, oFile): + """Complete set of frequent patterns will be saved in to an output file from this function + + :param oFile: Name of the output file + :type oFile: csv file + """ + + pass
+ + +
+[docs] + @abstractmethod + def getPatternsAsDataFrame(self): + """Complete set of frequent patterns will be loaded in to data frame from this function""" + + pass
+ + +
+[docs] + @abstractmethod + def getMemoryUSS(self): + """Total amount of USS memory consumed by the program will be retrieved from this function""" + + pass
+ + +
+[docs] + @abstractmethod + def getMemoryRSS(self): + """Total amount of RSS memory consumed by the program will be retrieved from this function""" + + pass
+ + + +
+[docs] + @abstractmethod + def getRuntime(self): + """Total amount of runtime taken by the program will be retrieved from this function""" + + pass
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/highUtilitySpatialPattern/basic/HDSHUIM.html b/sphinx/_build/html/_modules/PAMI/highUtilitySpatialPattern/basic/HDSHUIM.html new file mode 100644 index 000000000..72ad1209d --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/highUtilitySpatialPattern/basic/HDSHUIM.html @@ -0,0 +1,899 @@ + + + + + + PAMI.highUtilitySpatialPattern.basic.HDSHUIM — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.highUtilitySpatialPattern.basic.HDSHUIM

+# Spatial High Utility ItemSet Mining (SHUIM) [3] is an important model in data
+# mining with many real-world applications. It involves finding all spatially interesting itemSets having high value
+# in a quantitative spatio-temporal database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.highUtilitySpatialPattern.basic import HDSHUIM as alg
+#
+#             obj=alg.HDSHUIM("input.txt","Neighbours.txt",35)
+#
+#             obj.mine()
+#
+#             Patterns = obj.getPatterns()
+#
+#             print("Total number of Spatial High-Utility Patterns:", len(Patterns))
+#
+#             obj.save("output")
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.highUtilitySpatialPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+from deprecated import deprecated
+
+class _Element:
+    """
+    A class represents an Element of a utility list as used by the HDSHUIM algorithm.
+
+    :Attributes:
+
+        ts : int
+            keep tact of transaction id
+        snu : int
+            Spatial non-closed itemSet utility
+        remainingUtility : int
+            Spatial non-closed remaining utility
+        pu : int
+            prefix utility
+        prevPos: int
+            position of previous item in the list
+    """
+
+    def __init__(self, ts: int, snu: int, remainingUtility: int, pu: int, prevPos: int) -> None:
+        self.ts = ts
+        self.snu = snu
+        self.remainingUtility = remainingUtility
+        self.pu = pu
+        self.prevPos = prevPos
+
+
+class _CUList:
+    """
+        A class represents a UtilityList as used by the HDSHUIM algorithm.
+
+    :Attributes:
+
+        item: int
+            item 
+        sumSnu: long
+            the sum of item utilities
+        sumRemainingUtility: long
+            the sum of remaining utilities
+        sumCu : long
+            the sum of closed utilities
+        sumCru: long
+            the sum of closed remaining utilities
+        sumCpu: long
+            the sum of closed prefix utilities
+        elements: list
+            the list of elements 
+
+    :Methods:
+
+        addElement(element)
+            Method to add an element to this utility list and update the sums at the same time.
+
+    """
+
+    def __init__(self, item: str) -> None:
+        self.item = item
+        self.sumSnu = 0
+        self.sumRemainingUtility = 0
+        self.sumCu = 0
+        self.sumCru = 0
+        self.sumCpu = 0
+        self.elements = []
+
+    def addElements(self, element: _Element) -> None:
+        """
+        A method to add new element to CUList
+
+        :param element: element to be added to CUList
+        :type element: Element
+        :return: None
+        """
+        self.sumSnu += element.snu
+        self.sumRemainingUtility += element.remainingUtility
+        self.elements.append(element)
+
+
+class _Pair:
+    """
+    A class represent an item and its utility in a transaction
+    """
+
+    def __init__(self) -> None:
+        self.item = 0
+        self.utility = 0
+
+
+
+[docs] +class HDSHUIM(_ab._utilityPatterns): + """ + :Description: + + Spatial High Utility ItemSet Mining (SHUIM) [3] is an important model in data + mining with many real-world applications. It involves finding all spatially interesting itemSets having high value + in a quantitative spatio temporal database. + + :Reference: + + P. Pallikila et al., "Discovering Top-k Spatial High Utility Itemsets in Very Large Quantitative Spatiotemporal + databases," 2021 IEEE International Conference on Big Data (Big Data), Orlando, FL, USA, 2021, pp. 4925-4935, + doi: 10.1109/BigData52589.2021.9671912. + + :param iFile: str : + Name of the Input file to mine complete set of High Utility Spatial patterns + :param oFile: str : + Name of the output file to store complete set of High Utility Spatial patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float. + :param maxPer: float : + The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + :param minUtil: int : + Minimum utility threshold given by User + :param nFile: str : + Name of the input file to mine complete set of High Utility Spatial patterns + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : str + Name of the input file to mine complete set of frequent patterns + oFile : str + Name of the output file to store complete set of frequent patterns + nFile: str + Name of Neighbourhood items file + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + minUtil : int + The user given minUtil + mapFMAP: list + EUCS map of the FHM algorithm + candidates: int + candidates generated + huiCnt: int + huis created + neighbors: map + keep track of neighbours of elements + mapOfPMU: map + a map to keep track of Probable Maximum utility(PMU) of each item + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + constructCUL(x, compactUList, st, minUtil, length, exNeighbours) + A method to construct CUL's database + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + Explore_SearchTree(prefix, uList, exNeighbours, minUtil) + A method to find all high utility itemSets + updateClosed(x, compactUList, st, exCul, newT, ex, eyTs, length) + A method to update closed values + saveItemSet(prefix, prefixLen, item, utility) + A method to save itemSets + updateElement(z, compactUList, st, exCul, newT, ex, duPrevPos, eyTs) + A method to updates vales for duplicates + + + **Executing the code on terminal:** + ------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 HDSHUIM.py <inputFile> <outputFile> <Neighbours> <minUtil> <separator> + + Example Usage: + + (.venv) $ python3 HDSHUIM.py sampleTDB.txt output.txt sampleN.txt 35 ',' + + .. note:: minSup will be considered in percentage of database transactions + + + **Sample run of importing the code:** + --------------------------------------- + .. code-block:: python + + from PAMI.highUtilityGeoreferencedFrequentPattern.basic import HDSHUIM as alg + + obj=alg.HDSHUIM("input.txt","Neighbours.txt",35) + + obj.mine() + + Patterns = obj.getPatterns() + + print("Total number of Spatial High-Utility Patterns:", len(Patterns)) + + obj.save("output") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + --------------- + The complete program was written by B.Sai Chitra under the supervision of Professor Rage Uday Kiran. + + """ + + _startTime = float() + _endTime = float() + _minSup = str() + _maxPer = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _nFile = " " + _minUtil = 0 + _memoryUSS = float() + _memoryRSS = float() + _sep = "\t" + + def __init__(self, iFile: str, nFile: str, minUtil: int, sep: str="\t") -> None: + super().__init__(iFile, nFile, minUtil, sep) + self._startTime = 0 + self._endTime = 0 + self._huiCount = 0 + self._candidates = 0 + self._mapOfPMU = {} + self._mapFMAP = {} + self._neighbors = {} + self._finalPatterns = {} + + def _compareItems(self, o1: Any, o2: Any) -> int: + """ + A Function that sort all FFI-list in ascending order of Support + + :param o1: First FFI-list + + :type o1: _FFList + + :param o2: Second FFI-list + + :type o1: _FFList + + :return: Comparision Value + + :rtype: int + """ + compare = self._mapOfPMU[o1.item] - self._mapOfPMU[o2.item] + if compare == 0: + return int(o1.item) - int(o2.item) + else: + return compare + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + main program to start the operation + """ + self.mine()
+ + + + +
+[docs] + def mine(self) -> None: + """ + main program to start the operation + """ + minUtil = self._minUtil + self._startTime = _ab._time.time() + with open(self._nFile, 'r') as file1: + for line in file1: + line = line.split("\n")[0] + parts = line.split(self._sep) + parts = [i.strip() for i in parts] + item = parts[0] + neigh1 = list() + for i in range(1, len(parts)): + neigh1.append(parts[i]) + self._neighbors[item] = set(neigh1) + with open(self._iFile, 'r') as file: + for line in file: + parts = line.split(":") + itemString = (parts[0].split("\n")[0]).split(self._sep) + utilityString = (parts[2].split("\n")[0]).split(self._sep) + transUtility = int(parts[1]) + trans1 = set() + for i in range(0, len(itemString)): + trans1.add(itemString[i]) + for i in range(0, len(itemString)): + item = itemString[i] + twu = self._mapOfPMU.get(item) + if twu is None: + twu = int(utilityString[i]) + else: + twu += int(utilityString[i]) + self._mapOfPMU[item] = twu + if self._neighbors.get(item) is None: + continue + neighbours2 = trans1.intersection(self._neighbors.get(item)) + for item2 in neighbours2: + if self._mapOfPMU.get(item2) is None: + self._mapOfPMU[item2] = int(utilityString[i]) + else: + self._mapOfPMU[item2] += int(utilityString[i]) + + listOfCUList = [] + hashTable = {} + mapItemsToCUList = {} + for item in self._mapOfPMU.keys(): + if self._mapOfPMU.get(item) >= minUtil: + uList = _CUList(item) + mapItemsToCUList[item] = uList + listOfCUList.append(uList) + listOfCUList.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + ts = 1 + with open(self._iFile, 'r') as file: + for line in file: + parts = line.split(":") + items = (parts[0].split("\n")[0]).split(self._sep) + utilities = (parts[2].split("\n")[0]).split(self._sep) + ru = 0 + newTwu = 0 + txKey = [] + revisedTrans = [] + for i in range(0, len(items)): + pair = _Pair() + pair.item = items[i] + pair.utility = int(utilities[i]) + if self._mapOfPMU.get(pair.item) >= minUtil: + revisedTrans.append(pair) + txKey.append(pair.item) + newTwu += pair.utility + revisedTrans.sort(key=_ab._functools.cmp_to_key(self._compareItems)) + txKey1 = tuple(txKey) + if len(revisedTrans) > 0: + if txKey1 not in hashTable.keys(): + hashTable[txKey1] = len(mapItemsToCUList[revisedTrans[len(revisedTrans) - 1].item].elements) + for i in range(len(revisedTrans) - 1, -1, -1): + pair = revisedTrans[i] + cuListOfItems = mapItemsToCUList.get(pair.item) + element = _Element(ts, pair.utility, ru, 0, 0) + if i > 0: + element.prevPos = len(mapItemsToCUList[revisedTrans[i - 1].item].elements) + else: + element.prevPos = -1 + cuListOfItems.addElements(element) + ru += pair.utility + else: + pos = hashTable[txKey1] + ru = 0 + for i in range(len(revisedTrans) - 1, -1, -1): + cuListOfItems = mapItemsToCUList[revisedTrans[i].item] + cuListOfItems.elements[pos].snu += revisedTrans[i].utility + cuListOfItems.elements[pos].remainingUtility += ru + cuListOfItems.sumSnu += revisedTrans[i].utility + cuListOfItems.sumRemainingUtility += ru + ru += revisedTrans[i].utility + pos = cuListOfItems.elements[pos].prevPos + # EUCS + for i in range(len(revisedTrans) - 1, -1, -1): + pair = revisedTrans[i] + mapFMAPItem = self._mapFMAP.get(pair.item) + if mapFMAPItem is None: + mapFMAPItem = {} + self._mapFMAP[pair.item] = mapFMAPItem + for j in range(i + 1, len(revisedTrans)): + pairAfter = revisedTrans[j] + twuSUm = mapFMAPItem.get(pairAfter.item) + if twuSUm is None: + mapFMAPItem[pairAfter.item] = newTwu + else: + mapFMAPItem[pairAfter.item] = twuSUm + newTwu + ts += 1 + exNeighbours = set(self._mapOfPMU.keys()) + # print(self.Neighbours) + self._ExploreSearchTree([], listOfCUList, exNeighbours, minUtil) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + + def _ExploreSearchTree(self, prefix: List[str], uList: List[_CUList], exNeighbours: set, minUtil: int) -> None: + """ + A method to find all high utility itemSets + :parm prefix: it represents all items in prefix + :type prefix :list + :parm uList:projected Utility list. + :type uList: list + :parm exNeighbours: keep track of common Neighbours + :type exNeighbours: set + :parm minUtil:user minUtil + :type minUtil:int + :return: None + """ + for i in range(0, len(uList)): + x = uList[i] + if x.item not in exNeighbours: + continue + self._candidates += 1 + sortedPrefix = [0] * (len(prefix) + 1) + sortedPrefix = prefix[0:len(prefix) + 1] + sortedPrefix.append(x.item) + if (x.sumSnu + x.sumCu >= minUtil) and (x.item in exNeighbours): + self._saveItemSet(prefix, len(prefix), x.item, x.sumSnu + x.sumCu) + if x.sumSnu + x.sumCu + x.sumRemainingUtility + x.sumCru >= minUtil: # U-Prune # and (x.item in exNeighbours)): + ULIST = [] + for j in range(i, len(uList)): + if (uList[j].item in exNeighbours) and (self._neighbors.get(x.item) is not None) and ( + uList[j].item in self._neighbors.get(x.item)): + ULIST.append(uList[j]) + exULs = self._constructCUL(x, ULIST, -1, minUtil, len(sortedPrefix), exNeighbours) + if self._neighbors.get(x.item) is not None and exNeighbours is not None: + set1 = exNeighbours.intersection(self._neighbors.get(x.item)) + if exULs is None or set1 is None: + continue + self._ExploreSearchTree(sortedPrefix, exULs, set1, minUtil) + + def _constructCUL(self, x: _Element, compactUList: List[_CUList], st: int, minUtil: int, length: int, exNeighbours: set) -> List[_CUList]: + """ + A method to construct CUL's database + :parm x: Compact utility list + :type x: Node + :parm compactUList:list of Compact utility lists. + :type compactUList:list + :parm st: starting pos of compactUList + :type st:int + :parm minUtil: user minUtil + :type minUtil:int + :parm length: length of x + :type length:int + :parm exNeighbours: common Neighbours + :type exNeighbours: set + :return: projected database of list X + :rtype: list or set + """ + exCul = [] + lau = [] + cUtil = [] + eyTs = [] + for i in range(0, len(compactUList)): + uList = _CUList(compactUList[i].item) + exCul.append(uList) + lau.append(0) + cUtil.append(0) + eyTs.append(0) + sz = len(compactUList) - (st + 1) + exSZ = sz + for j in range(st + 1, len(compactUList)): + mapOfTWUF = self._mapFMAP[x.item] + if mapOfTWUF is not None: + twuf = mapOfTWUF.get(compactUList[j].item) + if twuf != None and twuf < minUtil or (not (exCul[j].item in exNeighbours)): + exCul[j] = None + exSZ = sz - 1 + else: + uList = _CUList(compactUList[j].item) + exCul[j] = uList + eyTs[j] = 0 + lau[j] = x.sumCu + x.sumCru + x.sumSnu + x.sumRemainingUtility + cUtil[j] = x.sumCu + x.sumCru + hashTable = {} + for ex in x.elements: + newT = [] + for j in range(st + 1, len(compactUList)): + if exCul[j] is None: + continue + eyList = compactUList[j].elements + while eyTs[j] < len(eyList) and eyList[eyTs[j]].ts < ex.ts: + eyTs[j] = eyTs[j] + 1 + if eyTs[j] < len(eyList) and eyList[eyTs[j]].ts == ex.ts: + newT.append(j) + else: + lau[j] = lau[j] - ex.snu - ex.remainingUtility + if lau[j] < minUtil: + exCul[j] = None + exSZ = exSZ - 1 + if len(newT) == exSZ: + self._updateClosed(x, compactUList, st, exCul, newT, ex, eyTs, length) + else: + if len(newT) == 0: + continue + ru = 0 + newT1 = tuple(newT) + if newT1 not in hashTable.keys(): + hashTable[newT1] = len(exCul[newT[len(newT) - 1]].elements) + for i in range(len(newT) - 1, -1, -1): + cuListOfItems = exCul[newT[i]] + y = compactUList[newT[i]].elements[eyTs[newT[i]]] + element = _Element(ex.ts, ex.snu + y.snu - ex.pu, ru, ex.snu, 0) + if i > 0: + element.prevPos = len(exCul[newT[i - 1]].elements) + else: + element.prevPos = -1 + cuListOfItems.addElements(element) + ru += y.snu - ex.pu + else: + dPrevPos = hashTable[newT1] + self._updateElement(x, compactUList, st, exCul, newT, ex, dPrevPos, eyTs) + for j in range(st + 1, len(compactUList)): + cUtil[j] = cUtil[j] + ex.snu + ex.remainingUtility + filter_compactUList = [] + for j in range(st + 1, len(compactUList)): + if cUtil[j] < minUtil or exCul[j] is None: + continue + else: + if length > 1: + exCul[j].sumCu += compactUList[j].sumCu + x.sumCu - x.sumCpu + exCul[j].sumCru += compactUList[j].sumCru + exCul[j].sumCpu += x.sumCu + filter_compactUList.append(exCul[j]) + return filter_compactUList + + def _updateClosed(self, x: _Element, compactUList: List[_CUList], st: int, exCul: List[_CUList], newT: List[int], ex: _Element, eyTs: List[int], length: int) -> None: + """ + A method to update closed values + :parm x: Compact utility list. + :type x: list + :parm compactUList:list of Compact utility lists. + :type compactUList:list + :parm st: starting pos of compactUList + :type st:int + :parm newT:transaction to be updated + :type newT:list + :parm ex: element ex + :type ex:element + :parm eyTs:list of tss + :type eyTs:ts + :parm length: length of x + :type length:int + :return: None + """ + remainingUtility = 0 + for j in range(len(newT) - 1, -1, -1): + ey = compactUList[newT[j]] + eyy = ey.elements[eyTs[newT[j]]] + exCul[newT[j]].sumCu += ex.snu + eyy.snu - ex.pu + exCul[newT[j]].sumCru += remainingUtility + exCul[newT[j]].sumCpu += ex.snu + remainingUtility = remainingUtility + eyy.snu - ex.pu + + def _updateElement(self, z: _Element, compactUList: List[_CUList], st: int, exCul: List[_CUList], newT: List[int], ex: _Element, duPrevPos: int, eyTs: List[int]) -> None: + """ + A method to updates vales for duplicates + :parm z: Compact utility list + :type z: list + :parm compactUList:list of Compact utility lists + :type compactUList:list + :parm st: starting pos of compactUList + :type st:int + :parm exCul:list of compactUList + :type exCul:list + :parm newT:transaction to be updated + :type newT:list + :parm ex: element ex + :type ex:element + :parm duPrevPos: position of z in exCul + :type duPrevPos:int + :parm eyTs:list of tss + :type eyTs:ts + :return: None + """ + remainingUtility = 0 + pos = duPrevPos + for j in range(len(newT) - 1, -1, -1): + ey = compactUList[newT[j]] + eyy = ey.elements[eyTs[newT[j]]] + exCul[newT[j]].elements[pos].snu += ex.snu + eyy.snu - ex.pu + exCul[newT[j]].sumSnu += ex.snu + eyy.snu - ex.pu + exCul[newT[j]].elements[pos].remainingUtility += remainingUtility + exCul[newT[j]].sumRemainingUtility += remainingUtility + exCul[newT[j]].elements[pos].pu += ex.snu + remainingUtility = remainingUtility + eyy.snu - ex.pu + pos = exCul[newT[j]].elements[pos].prevPos + + def _saveItemSet(self, prefix: List[str], prefixLen: int, item: str, utility: int) -> None: + """ + A method to save itemSets + :parm prefix: it represents all items in prefix + :type prefix :list + :parm item:item + :type item: int + :parm utility:utility of itemSet + :type utility:int + :return: None + """ + self._huiCount += 1 + res = str() + for i in range(0, prefixLen): + res += str(prefix[i]) + "\t" + res += str(item) + res1 = str(utility) + self._finalPatterns[res] = res1 + +
+[docs] + def getPatternsAsDataFrame(self) -> Dict[str, str]: + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def getPatterns(self) -> Dict[str, str]: + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self.oFile = outFile + writer = open(self.oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime - self._startTime
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + """ + print("Total number of Spatial High Utility Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in seconds:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: # to include a user specified separator + _ap = HDSHUIM(_ab._sys.argv[1], _ab._sys.argv[3], int(_ab._sys.argv[4]), _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: # to consider "\t" as a separator + _ap = HDSHUIM(_ab._sys.argv[1], _ab._sys.argv[3], int(_ab._sys.argv[4])) + _ap.startMine() + _ap.mine() + print("Total number of Spatial High-Utility Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + for i in [100000, 500000]: + _ap = HDSHUIM('/Users/Likhitha/Downloads/mushroom_main_2000.txt', + '/Users/Likhitha/Downloads/mushroom_neighbors_2000.txt', i, ' ') + _ap.startMine() + _ap.mine() + print("Total number of Spatial High Utility Patterns:", len(_ap.getPatterns())) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/highUtilitySpatialPattern/basic/SHUIM.html b/sphinx/_build/html/_modules/PAMI/highUtilitySpatialPattern/basic/SHUIM.html new file mode 100644 index 000000000..9743aee3e --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/highUtilitySpatialPattern/basic/SHUIM.html @@ -0,0 +1,1067 @@ + + + + + + PAMI.highUtilitySpatialPattern.basic.SHUIM — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.highUtilitySpatialPattern.basic.SHUIM

+# Spatial High Utility itemSet Mining (SHUIM) aims to discover all itemSets in a spatioTemporal database
+# that satisfy the user-specified minimum utility and maximum distance constraints
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.highUtilitySpatialPattern.basic import SHUIM as alg
+#
+#             obj=alg.SHUIM("input.txt","Neighbours.txt",35)
+#
+#             obj.mine()
+#
+#             frequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Spatial high utility Patterns:", len(frequentPatterns))
+#
+#             obj.save("output")
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.highUtilitySpatialPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator, Optional, TypeVar
+from functools import cmp_to_key as _cmpToKey
+import pandas as pd
+from deprecated import deprecated
+
+
+class _Transaction:
+    """
+    A class to store Transaction of a database
+
+    :Attributes:
+
+        items: list
+            A list of items in transaction 
+        utilities: list
+            A list of utilities of items in transaction
+        transactionUtility: int
+            represent total sum of all utilities in the database
+        pmus: list
+            represent the pmu (probable maximum utility) of each element in the transaction
+        prefixUtility:
+            prefix Utility values of item
+        offset:
+            an offset pointer, used by projected transactions
+    :Methods:
+
+        projectedTransaction(offsetE):
+            A method to create new Transaction from existing till offsetE
+        getItems():
+            return items in transaction
+        getUtilities():
+            return utilities in transaction
+        getPmus():
+            return pmus in transaction
+        getLastPosition():
+            return last position in a transaction
+        removeUnpromisingItems():
+            A method to remove items with low Utility than minUtil
+        insertionSort():
+            A method to sort all items in the transaction
+    """
+    offset = 0
+    prefixUtility = 0
+    Self = TypeVar("Self", bound="_Transaction")
+    
+    def __init__(self, items: List[int], utilities: List[int], transactionUtility: int, pmus: Optional[List[int]]=None) -> None:
+        self.items = items
+        self.utilities = utilities
+        self.transactionUtility = transactionUtility
+        if pmus is not None:
+            self.pmus = pmus
+
+    def projectTransaction(self, offsetE: int) -> Self:
+        """
+        A method to create new Transaction from existing till offsetE
+
+        :param offsetE: an offset over the original transaction for projecting the transaction
+        :type offsetE: int
+        """
+        new_transaction = _Transaction(self.items, self.utilities, self.transactionUtility)
+        utilityE = self.utilities[offsetE]
+        new_transaction.prefixUtility = self.prefixUtility + utilityE
+        new_transaction.transactionUtility = self.transactionUtility - utilityE
+        for i in range(self.offset, offsetE):
+            new_transaction.transactionUtility -= self.utilities[i]
+        new_transaction.offset = offsetE + 1
+        return new_transaction
+
+    def getItems(self) -> List[int]:
+        """
+        A method to return items in transaction
+        """
+        return self.items
+
+    def getPmus(self) -> List[int]:
+        """
+        A method to return pmus in transaction
+        :return: pmus in transaction
+        :rtype: list
+        """
+        return self.pmus
+
+    def getUtilities(self) -> List[int]:
+        """
+        A method to return utilities in transaction
+        :return: utilities in transaction
+        :rtype: list
+        """
+        return self.utilities
+
+    # get the last position in this transaction
+    def getLastPosition(self) -> int:
+        """
+        A method to return last position in a transaction
+        :return: last position in a transaction
+        :rtype: int
+        """
+        return len(self.items) - 1
+
+    def removeUnpromisingItems(self, oldNamesToNewNames: Dict[int, int]) -> None:
+        """
+        A method to remove items with low Utility than minUtil
+
+        :param oldNamesToNewNames: A map represent old names to new names
+
+        :type oldNamesToNewNames: map
+
+        :return: None
+        """
+        tempItems = []
+        tempUtilities = []
+        for idx, item in enumerate(self.items):
+            if item in oldNamesToNewNames:
+                tempItems.append(oldNamesToNewNames[item])
+                tempUtilities.append(self.utilities[idx])
+            else:
+                self.transactionUtility -= self.utilities[idx]
+        self.items = tempItems
+        self.utilities = tempUtilities
+        self.insertionSort()
+
+    def insertionSort(self) -> None:
+        """
+        A method to sort items in order
+        :return: None
+        """
+        for i in range(1, len(self.items)):
+            key = self.items[i]
+            utilityJ = self.utilities[i]
+            j = i - 1
+            while j >= 0 and key < self.items[j]:
+                self.items[j + 1] = self.items[j]
+                self.utilities[j + 1] = self.utilities[j]
+                j -= 1
+            self.items[j + 1] = key
+            self.utilities[j + 1] = utilityJ
+
+
+class _Dataset:
+    """
+    A class represent the list of transactions in this dataset
+
+    :Attributes:
+
+        transactions :
+            the list of transactions in this dataset
+        maxItem:
+            the largest item name
+        
+    :methods:
+
+        createTransaction(line):
+            Create a transaction object from a line from the input file
+        getMaxItem():
+            return Maximum Item
+        getTransactions():
+            return transactions in database
+
+    """
+    transactions = []
+    maxItem = 0
+    
+    def __init__(self, datasetpath: str, sep: str) -> None:
+        self.strToInt = {}
+        self.intToStr = {}
+        self.cnt = 1
+        self.sep = sep
+        self.transactions = []
+        with open(datasetpath, 'r') as f:
+            lines = f.readlines()
+            for line in lines:
+                self.transactions.append(self.createTransaction(line))
+        f.close()
+
+    def createTransaction(self, line: str) -> _Transaction:
+        """
+        A method to create Transaction from dataset given
+
+        :param line: represent a single line of database
+        :type line: string
+        :return : Transaction.
+        :rtype: Transaction
+        """
+        trans_list = line.strip().split(':')
+        transactionUtility = int(trans_list[1])
+        itemsString = trans_list[0].strip().split(self.sep)
+        utilityString = trans_list[2].strip().split(self.sep)
+        pmuString = trans_list[3].strip().split(self.sep)
+        items = []
+        utilities = []
+        pmus = []
+        for idx, item in enumerate(itemsString):
+            if (self.strToInt).get(item) is None:
+                self.strToInt[item] = self.cnt
+                self.intToStr[self.cnt] = item
+                self.cnt += 1
+            item_int = self.strToInt.get(item)
+            if item_int > self.maxItem:
+                self.maxItem = item_int
+            items.append(item_int)
+            utilities.append(int(utilityString[idx]))
+            pmus.append(int(pmuString[idx]))
+        return _Transaction(items, utilities, transactionUtility, pmus)
+
+    def getMaxItem(self) -> int:
+        """
+        A method to return name of the largest item
+
+        :return: the largest item in the list
+
+        :rtype: int
+        """
+        return self.maxItem
+
+    def getTransactions(self) -> List[_Transaction]:
+        """
+        A method to return transactions from database
+
+        :return: the list of transactions stored in the database
+
+        :rtype: list
+        """
+        return self.transactions
+
+
+
+[docs] +class SHUIM(_ab._utilityPatterns): + """ + :Description: + + Spatial High Utility itemSet Mining (SHUIM) aims to discover all itemSets in a spatioTemporal database + that satisfy the user-specified minimum utility and maximum distance constraints + + :Reference: + + Rage, Uday & Veena, Pamalla & Penugonda, Ravikumar & Raj, Bathala & Dao, Minh & Zettsu, Koji & Bommisetti, Sai. + (2023). HDSHUI-miner: a novel algorithm for discovering spatial high-utility itemsets in high-dimensional + spatiotemporal databases. Applied Intelligence. 53. 1-26. 10.1007/s10489-022-04436-w. + + :param iFile: str : + Name of the Input file to mine complete set of High Utility Spatial patterns + :param oFile: str : + Name of the output file to store complete set of High Utility Spatial patterns + :param minSup: int or float or str : + The user can specify minSup either in count or proportion of database size. If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. Otherwise, it will be treated as float. + :param maxPer: float : + The user can specify maxPer in count or proportion of database size. If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + :param minUtil: int : + Minimum utility threshold given by User + :param maxMemory: int : + Maximum memory used by this program for running + :param candidateCount: int : + Number of candidates to consider when calculating a high utility spatial pattern + :param nFile: str : + Name of the input file to mine complete set of High Utility Spatial patterns + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + + :Attributes: + + iFile : file + Name of the input file to mine complete set of frequent patterns + nFile : file + Name of the Neighbours file that contain neighbours of items + oFile : file + Name of the output file to store complete set of frequent patterns + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + minUtil : int + The user given minUtil + highUtilityItemSets: map + set of high utility itemSets + candidateCount: int + Number of candidates + utilityBinArrayLU: list + A map to hold the pmu values of the items in database + utilityBinArraySU: list + A map to hold the subtree utility values of the items is database + oldNamesToNewNames: list + A map to hold the subtree utility values of the items is database + newNamesToOldNames: list + A map to store the old name corresponding to new name + Neighbours : map + A dictionary to store the neighbours of a item + maxMemory:Maximum memory used by this program for running + patternCount: int + Number of SHUI's + itemsToKeep: list + keep only the promising items ie items having twu >= minUtil + itemsToExplore: list + keep items that subtreeUtility grater than minUtil + + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + calculateNeighbourIntersection(self, prefixLength) + A method to return common Neighbours of items + backtrackingEFIM(transactionsOfP, itemsToKeep, itemsToExplore, prefixLength) + A method to mine the SHUIs Recursively + useUtilityBinArraysToCalculateUpperBounds(transactionsPe, j, itemsToKeep, neighbourhoodList) + A method to calculate the sub-tree utility and local utility of all items that can extend itemSet P and e + output(tempPosition, utility) + A method ave a high-utility itemSet to file or memory depending on what the user chose + _isEqual(transaction1, transaction2) + A method to Check if two transaction are identical + intersection(lst1, lst2) + A method that return the intersection of 2 list + useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(dataset) + Scan the initial database to calculate the subtree utility of each items using a utility-bin array + sortDatabase(self, transactions) + A Method to sort transaction in the order of PMU + sort_transaction(self, trans1, trans2) + A Method to sort transaction in the order of PMU + useUtilityBinArrayToCalculateLocalUtilityFirstTime(self, dataset) + A method to scan the database using utility bin array to calculate the pmus + + **Executing the code on terminal:** + --------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 SHUIM.py <inputFile> <outputFile> <Neighbours> <minUtil> <sep> + + Example Usage: + + (.venv) $ python3 SHUIM.py sampleTDB.txt output.txt sampleN.txt 35 + + .. note:: minSup will be considered in percentage of database transactions + + + **Sample run of importing the code:** + -------------------------------------- + .. code-block:: python + + from PAMI.highUtilitySpatialPattern.basic import SHUIM as alg + + obj=alg.SHUIM("input.txt","Neighbours.txt",35) + + obj.mine() + + frequentPatterns = obj.getPatterns() + + print("Total number of Spatial high utility Patterns:", len(frequentPatterns)) + + obj.save("output") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + -------------- + The complete program was written by Pradeep Pallikila under the supervision of Professor Rage Uday Kiran. + """ + _highUtilityItemSets = [] + _candidateCount = 0 + _utilityBinArrayLU = {} + _utilityBinArraySU = {} + _oldNamesToNewNames = {} + _newNamesToOldNames = {} + _strToInt = {} + _intToStr = {} + _Neighbours = {} + _temp = [0] * 5000 + _maxMemory = 0 + _startTime = float() + _endTime = float() + _minSup = str() + _maxPer = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _nFile = " " + _sep = "\t" + _minUtil = 0 + _memoryUSS = float() + _memoryRSS = float() + + def __init__(self, iFile: str, nFile: str, minUtil: int, sep: str="\t") -> None: + super().__init__(iFile, nFile, minUtil, sep) + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + main program to start the operation + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + main program to start the operation + """ + self._startTime = _ab._time.time() + self._patternCount = 0 + self._finalPatterns = {} + self._dataset = _Dataset(self._iFile, self._sep) + with open(self._nFile, 'r') as o: + lines = o.readlines() + for line in lines: + line = line.split("\n")[0] + line_split = line.split(self._sep) + line_split = [i.strip() for i in line_split] + item = self._dataset.strToInt.get(line_split[0]) + lst = [] + for i in range(1, len(line_split)): + lst.append(self._dataset.strToInt.get(line_split[i])) + self._Neighbours[item] = lst + o.close() + #print(len(self._Neighbours)) + InitialMemory = _ab._psutil.virtual_memory()[3] + self._useUtilityBinArrayToCalculateLocalUtilityFirstTime(self._dataset) + itemsToKeep = [] + for key in self._utilityBinArrayLU.keys(): + if self._utilityBinArrayLU[key] >= self._minUtil: + itemsToKeep.append(key) + itemsToKeep = sorted(itemsToKeep, key=lambda x: self._utilityBinArrayLU[x]) + currentName = 1 + for idx, item in enumerate(itemsToKeep): + self._oldNamesToNewNames[item] = currentName + self._newNamesToOldNames[currentName] = item + itemsToKeep[idx] = currentName + currentName += 1 + for transaction in self._dataset.getTransactions(): + transaction.removeUnpromisingItems(self._oldNamesToNewNames) + self._sortDatabase(self._dataset.getTransactions()) + emptyTransactionCount = 0 + for transaction in self._dataset.getTransactions(): + if len(transaction.getItems()) == 0: + emptyTransactionCount += 1 + self._dataset.transactions = self._dataset.transactions[emptyTransactionCount:] + self._useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(self._dataset) + itemsToExplore = [] + for item in itemsToKeep: + if self._utilityBinArraySU[item] >= self._minUtil: + itemsToExplore.append(item) + commonitems = [] + for i in range(self._dataset.maxItem): + commonitems.append(i) + self._backtrackingEFIM(self._dataset.getTransactions(), itemsToKeep, itemsToExplore, 0) + finalMemory = _ab._psutil.virtual_memory()[3] + memory = (finalMemory - InitialMemory) / 10000 + if memory > self._maxMemory: + self._maxMemory = memory + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + + def _backtrackingEFIM(self, transactionsOfP: List[_Transaction], itemsToKeep: List[int], itemsToExplore: List[int], prefixLength: int) -> None: + """ + A method to mine the SHUIs Recursively + + :param transactionsOfP: the list of transactions containing the current prefix P + :type transactionsOfP: list + :param itemsToKeep: the list of secondary items in the p-projected database + :type itemsToKeep: list + :param itemsToExplore: the list of primary items in the p-projected database + :type itemsToExplore: list + :param prefixLength: current prefixLength + :type prefixLength: int + :return: None + """ + self._candidateCount += len(itemsToExplore) + for idx, e in enumerate(itemsToExplore): + initialMemory = _ab._psutil.virtual_memory()[3] + transactionsPe = [] + utilityPe = 0 + previousTransaction = transactionsOfP[0] + consecutiveMergeCount = 0 + for transaction in transactionsOfP: + items = transaction.getItems() + if e in items: + positionE = items.index(e) + if transaction.getLastPosition() == positionE: + utilityPe += transaction.getUtilities()[positionE] + transaction.prefixUtility + else: + projectedTransaction = transaction.projectTransaction(positionE) + utilityPe += projectedTransaction.prefixUtility + if previousTransaction == transactionsOfP[0]: + previousTransaction = projectedTransaction + elif self._isEqual(projectedTransaction, previousTransaction): + if consecutiveMergeCount == 0: + items = previousTransaction.items[previousTransaction.offset:] + utilities = previousTransaction.utilities[previousTransaction.offset:] + itemsCount = len(items) + positionPrevious = 0 + positionProjection = projectedTransaction.offset + while positionPrevious < itemsCount: + utilities[positionPrevious] += projectedTransaction.utilities[positionProjection] + positionPrevious += 1 + positionProjection += 1 + previousTransaction.prefixUtility += projectedTransaction.prefixUtility + sumUtilities = previousTransaction.prefixUtility + previousTransaction = _Transaction(items, utilities, previousTransaction.transactionUtility + projectedTransaction.transactionUtility) + previousTransaction.prefixUtility = sumUtilities + else: + positionPrevious = 0 + positionProjected = projectedTransaction.offset + itemsCount = len(previousTransaction.items) + while positionPrevious < itemsCount: + previousTransaction.utilities[positionPrevious] += projectedTransaction.utilities[ + positionProjected] + positionPrevious += 1 + positionProjected += 1 + previousTransaction.transactionUtility += projectedTransaction.transactionUtility + previousTransaction.prefixUtility += projectedTransaction.prefixUtility + consecutiveMergeCount += 1 + else: + transactionsPe.append(previousTransaction) + previousTransaction = projectedTransaction + consecutiveMergeCount = 0 + transaction.offset = positionE + if previousTransaction != transactionsOfP[0]: + transactionsPe.append(previousTransaction) + self._temp[prefixLength] = self._newNamesToOldNames[e] + if utilityPe >= self._minUtil: + self._output(prefixLength, utilityPe) + neighbourhoodList = self._calculateNeighbourIntersection(prefixLength) + self._useUtilityBinArraysToCalculateUpperBounds(transactionsPe, idx, itemsToKeep, neighbourhoodList) + newItemsToKeep = [] + newItemsToExplore = [] + for l in range(idx + 1, len(itemsToKeep)): + itemK = itemsToKeep[l] + if self._utilityBinArraySU[itemK] >= self._minUtil: + if itemK in neighbourhoodList: + newItemsToExplore.append(itemK) + newItemsToKeep.append(itemK) + elif self._utilityBinArrayLU[itemK] >= self._minUtil: + if itemK in neighbourhoodList: + newItemsToKeep.append(itemK) + self._backtrackingEFIM(transactionsPe, newItemsToKeep, newItemsToExplore, prefixLength + 1) + finalMemory = _ab._psutil.virtual_memory()[3] + memory = (finalMemory - initialMemory) / 10000 + if self._maxMemory < memory: + self._maxMemory = memory + + def _useUtilityBinArraysToCalculateUpperBounds(self, transactionsPe: List[_Transaction], j: int, itemsToKeep: List[int], neighbourhoodList: List[int]) -> None: + """ + A method to calculate the subtree utility and local utility of all items that can extend itemSet P U {e} + + :param transactionsPe: transactions the projected database for P U {e} + :type transactionsPe: list + :param j:the position of j in the list of promising items + :type j:int + :param itemsToKeep :the list of promising items + :type itemsToKeep: list + :return: None + """ + for i in range(j + 1, len(itemsToKeep)): + item = itemsToKeep[i] + self._utilityBinArrayLU[item] = 0 + self._utilityBinArraySU[item] = 0 + for transaction in transactionsPe: + length = len(transaction.getItems()) + i = length - 1 + while i >= transaction.offset: + item = transaction.getItems()[i] + if item in itemsToKeep: + remainingUtility = 0 + if self._newNamesToOldNames[item] in self._Neighbours: + item_neighbours = self._Neighbours[self._newNamesToOldNames[item]] + for k in range(i, length): + transaction_item = transaction.getItems()[k] + if self._newNamesToOldNames[transaction_item] in item_neighbours and transaction_item in neighbourhoodList: + remainingUtility += transaction.getUtilities()[k] + + remainingUtility += transaction.getUtilities()[i] + self._utilityBinArraySU[item] += remainingUtility + transaction.prefixUtility + self._utilityBinArrayLU[item] += transaction.transactionUtility + transaction.prefixUtility + i -= 1 + + def _calculateNeighbourIntersection(self, prefixLength: int) -> List[int]: + """ + A method to find common Neighbours + + :param prefixLength: the prefix itemSet + + :type prefixLength:int + + :return: the common neighbours + + :rtype: list + """ + intersectionList = self._Neighbours.get(self._temp[0]) + for i in range(1, prefixLength+1): + intersectionList = self._intersection(self._Neighbours[self._temp[i]], intersectionList) + finalIntersectionList = [] + if intersectionList is None: + return finalIntersectionList + for item in intersectionList: + if item in self._oldNamesToNewNames: + finalIntersectionList.append(self._oldNamesToNewNames[item]) + return finalIntersectionList + + def _output(self, tempPosition: int, utility: int) -> None: + """ + A method save all high-utility itemSet to file or memory depending on what the user chose + + :param tempPosition: position of last item + :type tempPosition : int + :param utility: total utility of itemSet + :type utility: int + :return: None + """ + self._patternCount += 1 + s1 = str() + for i in range(0, tempPosition+1): + s1 += self._dataset.intToStr.get((self._temp[i])) + if i != tempPosition: + s1 += "\t" + self._finalPatterns[s1] = str(utility) + + def _isEqual(self, transaction1: _Transaction, transaction2: _Transaction) -> bool: + """ + A method to Check if two transaction are identical + + :param transaction1: the first transaction. + :type transaction1: Transaction + :param transaction2: the second transaction. + :type transaction2: Transaction + :return : whether both are identical or not + :rtype: bool + """ + + length1 = len(transaction1.items) - transaction1.offset + length2 = len(transaction2.items) - transaction2.offset + if length1 != length2: + return False + position1 = transaction1.offset + position2 = transaction2.offset + while position1 < len(transaction1.items): + if transaction1.items[position1] != transaction2.items[position2]: + return False + position1 += 1 + position2 += 1 + return True + + def _intersection(self, lst1: List[int], lst2: List[int]) -> List[int]: + """ + A method that return the intersection of 2 list + + :param lst1: items neighbour to item1 + :type lst1: list + :param lst2: items neighbour to item2 + :type lst2: list + :return :intersection of two lists + :rtype : list + """ + temp = set(lst2) + lst3 = [value for value in lst1 if value in temp] + return lst3 + + def _useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(self, dataset: _Dataset) -> None: + """ + Scan the initial database to calculate the subtree utility of each item using a utility-bin array + + :param dataset: the transaction database + + :type dataset: Dataset + + :return: None + """ + for transaction in dataset.getTransactions(): + items = transaction.getItems() + utilities = transaction.getUtilities() + for idx, item in enumerate(items): + if item not in self._utilityBinArraySU: + self._utilityBinArraySU[item] = 0 + if self._newNamesToOldNames[item] not in self._Neighbours: + self._utilityBinArraySU[item] += utilities[idx] + continue + i = idx + 1 + sumSu = utilities[idx] + while i < len(items): + if self._newNamesToOldNames[items[i]] in self._Neighbours[self._newNamesToOldNames[item]]: + sumSu += utilities[i] + i += 1 + self._utilityBinArraySU[item] += sumSu + + def _sortDatabase(self, transactions: List[_Transaction]) -> None: + """ + A Method to sort transaction in the order of PMU + + :param transactions: transaction of items + :type transactions: Transaction + :return: sorted transaction. + :rtype: Transaction + """ + cmp_items = _cmpToKey(self._sortTransaction) + transactions.sort(key=cmp_items) + + def _sortTransaction(self, trans1: _Transaction, trans2: _Transaction) -> int: + """ + A Method to sort transaction in the order of PMU + + :param trans1: the first transaction. + :type trans1: Transaction + :param trans2:the second transaction. + :type trans2: Transaction + :return: sorted transaction. + :rtype: int + """ + trans1_items = trans1.getItems() + trans2_items = trans2.getItems() + pos1 = len(trans1_items) - 1 + pos2 = len(trans2_items) - 1 + if len(trans1_items) < len(trans2_items): + while pos1 >= 0: + sub = trans2_items[pos2] - trans1_items[pos1] + if sub != 0: + return sub + pos1 -= 1 + pos2 -= 1 + return -1 + elif len(trans1_items) > len(trans2_items): + while pos2 >= 0: + sub = trans2_items[pos2] - trans1_items[pos1] + if sub != 0: + return sub + pos1 -= 1 + pos2 -= 1 + return 1 + else: + while pos2 >= 0: + sub = trans2_items[pos2] - trans1_items[pos1] + if sub != 0: + return sub + pos1 -= 1 + pos2 -= 1 + return 0 + + def _useUtilityBinArrayToCalculateLocalUtilityFirstTime(self, dataset: _Dataset) -> None: + """ + A method to scan the database using utility bin array to calculate the pmus + + :param dataset: the transaction database. + + :type dataset: database + + :return: None + """ + for transaction in dataset.getTransactions(): + for idx, item in enumerate(transaction.getItems()): + if item in self._utilityBinArrayLU: + self._utilityBinArrayLU[item] += transaction.getPmus()[idx] + else: + self._utilityBinArrayLU[item] = transaction.getPmus()[idx] + +
+[docs] + def getPatternsAsDataFrame(self) -> pd.DataFrame: + """ + Storing final patterns in a dataframe + + :return: returning patterns in a dataframe + :rtype: pd.DataFrame + """ + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Utility']) + + return dataFrame
+ + +
+[docs] + def getPatterns(self) -> Dict[str, str]: + """ + Function to send the set of patterns after completion of the mining process + + :return: returning patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of patterns will be loaded in to an output file + + :param outFile: name of the output file + + :type outFile: csv file + + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime-self._startTime
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + """ + print("Total number of Spatial High Utility Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in seconds:", self.getRuntime())
+
+ + + +if __name__ == '__main__': + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = SHUIM(_ab._sys.argv[1], _ab._sys.argv[3], int(_ab._sys.argv[4]), _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = SHUIM(_ab._sys.argv[1], _ab._sys.argv[3], int(_ab._sys.argv[4])) + _ap.startMine() + _ap.mine() + print("Total number of Spatial High Utility Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + for i in [100000, 500000]: + _ap = SHUIM('/Users/Likhitha/Downloads/mushroom_main_2000.txt', '/Users/Likhitha/Downloads/mushroom_neighbors_2000.txt', i, ' ') + _ap.startMine() + _ap.mine() + print("Total number of Spatial High Utility Patterns:", len(_ap.getPatterns())) + #_ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/highUtilitySpatialPattern/topk/TKSHUIM.html b/sphinx/_build/html/_modules/PAMI/highUtilitySpatialPattern/topk/TKSHUIM.html new file mode 100644 index 000000000..c47e02fb8 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/highUtilitySpatialPattern/topk/TKSHUIM.html @@ -0,0 +1,1156 @@ + + + + + + PAMI.highUtilitySpatialPattern.topk.TKSHUIM — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.highUtilitySpatialPattern.topk.TKSHUIM

+# Top K Spatial High Utility ItemSet Mining (TKSHUIM) aims to discover Top-K Spatial High Utility Itemsets
+# (TKSHUIs) in a spatioTemporal database
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.highUtilitySpatialPattern.topk import TKSHUIM as alg
+#
+#             obj=alg.TKSHUIM("input.txt","Neighbours.txt",35)
+#
+#             obj.mine()
+#
+#             Patterns = obj.getPatterns()
+#
+#             print("Total number of  Patterns:", len(Patterns))
+#
+#             obj.save("output")
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.highUtilitySpatialPattern.topk.abstract import *
+from functools import cmp_to_key
+import heapq
+from deprecated import deprecated
+
+
+[docs] +class Transaction: + """ + A class to store Transaction of a database + + :Attributes: + + items: list + A list of items in transaction + utilities: list + A list of utilites of items in transaction + transactionUtility: int + represent total sum of all utilities in the database + pmus: list + represent the pmu (probable maximum utility) of each element in the transaction + prefixutility: + prefix Utility values of item + offset: + an offset pointer, used by projected transactions + + :Methods: + + projectedTransaction(offsetE): + A method to create new Transaction from existing till offsetE + getItems(): + return items in transaction + getUtilities(): + return utilities in transaction + getPmus(): + return pmus in transaction + getLastPosition(): + return last position in a transaction + removeUnpromisingItems(): + A method to remove items with low Utility than minUtil + insertionSort(): + A method to sort all items in the transaction + """ + offset = 0 + prefixUtility = 0 + + def __init__(self, items, utilities, transactionUtility, pmus=None): + self.items = items + self.utilities = utilities + self.transactionUtility = transactionUtility + if pmus is not None: + self.pmus = pmus + +
+[docs] + def projectTransaction(self, offsetE): + """ + A method to create new Transaction from existing till offsetE + + :param offsetE: an offset over the original transaction for projecting the transaction + :type offsetE: int + """ + new_transaction = Transaction(self.items, self.utilities, self.transactionUtility) + utilityE = self.utilities[offsetE] + new_transaction.prefixUtility = self.prefixUtility + utilityE + new_transaction.transactionUtility = self.transactionUtility - utilityE + for i in range(self.offset, offsetE): + new_transaction.transactionUtility -= self.utilities[i] + new_transaction.offset = offsetE + 1 + return new_transaction
+ + +
+[docs] + def getItems(self): + """ + A method to return items in transaction + """ + return self.items
+ + +
+[docs] + def getPmus(self): + """ + A method to return pmus in transaction + """ + return self.pmus
+ + +
+[docs] + def getUtilities(self): + """ + A method to return utilities in transaction + """ + return self.utilities
+ + + # get the last position in this transaction +
+[docs] + def getLastPosition(self): + """ + A method to return last position in a transaction + """ + return len(self.items) - 1
+ + +
+[docs] + def removeUnpromisingItems(self, oldNamesToNewNames): + """ + A method to remove items with low Utility than minUtil + + :param oldNamesToNewNames: A map represent old namses to new names + :type oldNamesToNewNames: map + """ + tempItems = [] + tempUtilities = [] + for idx, item in enumerate(self.items): + if item in oldNamesToNewNames: + tempItems.append(oldNamesToNewNames[item]) + tempUtilities.append(self.utilities[idx]) + else: + self.transactionUtility -= self.utilities[idx] + self.items = tempItems + self.utilities = tempUtilities + self.insertionSort()
+ + +
+[docs] + def insertionSort(self): + """ + A method to sort items in order + """ + for i in range(1, len(self.items)): + key = self.items[i] + utilityJ = self.utilities[i] + j = i - 1 + while j >= 0 and key < self.items[j]: + self.items[j + 1] = self.items[j] + self.utilities[j + 1] = self.utilities[j] + j -= 1 + self.items[j + 1] = key + self.utilities[j + 1] = utilityJ
+
+ + + +
+[docs] +class Dataset: + """ + A class represent the list of transactions in this dataset + + :Attributes: + + transactions: + the list of transactions in this dataset + maxItem: + the largest item name + + :methods: + + createTransaction(line): + Create a transaction object from a line from the input file + getMaxItem(): + return Maximum Item + getTransactions(): + return transactions in database + + """ + transactions = [] + maxItem = 0 + + def __init__(self, datasetpath, sep): + self.strToint = {} + self.intTostr = {} + self.cnt = 1 + self.sep = sep + with open(datasetpath, 'r') as f: + lines = f.readlines() + for line in lines: + self.transactions.append(self.createTransaction(line)) + f.close() + +
+[docs] + def createTransaction(self, line): + """ + A method to create Transaction from dataset given + + :param line: represent a single line of database + :type line: string + :return : Transaction. + :rtype: int + """ + trans_list = line.strip().split(':') + transactionUtility = int(trans_list[1]) + itemsString = trans_list[0].strip().split(self.sep) + utilityString = trans_list[2].strip().split(self.sep) + if (len(trans_list) == 4): + pmuString = trans_list[3].strip().split(self.sep) + items = [] + utilities = [] + pmus = [] + for idx, item in enumerate(itemsString): + if (self.strToint).get(item) is None: + self.strToint[item] = self.cnt + self.intTostr[self.cnt] = item + self.cnt += 1 + item_int = self.strToint.get(item) + if item_int > self.maxItem: + self.maxItem = item_int + items.append(item_int) + utilities.append(int(utilityString[idx])) + if (len(trans_list) == 4): + pmus.append(int(pmuString[idx])) + return Transaction(items, utilities, transactionUtility, pmus)
+ + +
+[docs] + def getMaxItem(self): + """ + A method to return name of the largest item + """ + return self.maxItem
+ + +
+[docs] + def getTransactions(self): + """ + A method to return transactions from database + """ + return self.transactions
+
+ + + +
+[docs] +class TKSHUIM(utilityPatterns): + """ + :Description: + Top K Spatial High Utility ItemSet Mining (TKSHUIM) aims to discover Top-K Spatial High Utility Itemsets + (TKSHUIs) in a spatioTemporal database + + :Reference: + + P. Pallikila et al., "Discovering Top-k Spatial High Utility Itemsets in Very Large Quantitative Spatiotemporal + databases," 2021 IEEE International Conference on Big Data (Big Data), Orlando, FL, USA, 2021, pp. 4925-4935, + doi: 10.1109/BigData52589.2021.9671912. + + :param iFile: str : + Name of the Input file to mine complete set of High Utility Spatial patterns + :param oFile: str : + Name of the output file to store complete set of High Utility Spatial patterns + :param minUtil: int : + Minimum utility threshold given by User + :param maxMemory: int : + Maximum memory used by this program for running + :param candidateCount: int : + Number of candidates to consider when calculating a high utility spatial pattern + :param nFile: str : + Name of the input file to mine complete set of High Utility Spatial patterns + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the input file to mine complete set of frequent patterns + nFile : file + Name of the Neighbours file that contain neighbours of items + oFile : file + Name of the output file to store complete set of frequent patterns + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + k : int + The user given k value + candidateCount: int + Number of candidates + utilityBinArrayLU: list + A map to hold the pmu values of the items in database + utilityBinArraySU: list + A map to hold the subtree utility values of the items is database + oldNamesToNewNames: list + A map to hold the subtree utility values of the items is database + newNamesToOldNames: list + A map to store the old name corresponding to new name + Neighbours : map + A dictionary to store the neighbours of a item + maxMemory: float + Maximum memory used by this program for running + itemsToKeep: list + keep only the promising items ie items having twu >= minUtil + itemsToExplore: list + keep items that subtreeUtility grater than minUtil + + :Methods: + + mine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + calculateNeighbourIntersection(self, prefixLength) + A method to return common Neighbours of items + backtrackingEFIM(transactionsOfP, itemsToKeep, itemsToExplore, prefixLength) + A method to mine the TKSHUIs Recursively + useUtilityBinArraysToCalculateUpperBounds(transactionsPe, j, itemsToKeep, neighbourhoodList) + A method to calculate the sub-tree utility and local utility of all items that can extend itemSet P and e + output(tempPosition, utility) + A method ave a high-utility itemSet to file or memory depending on what the user chose + is_equal(transaction1, transaction2) + A method to Check if two transaction are identical + intersection(lst1, lst2) + A method that return the intersection of 2 list + useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(dataset) + Scan the initial database to calculate the subtree utility of each items using a utility-bin array + sortDatabase(self, transactions) + A Method to sort transaction in the order of PMU + sort_transaction(self, trans1, trans2) + A Method to sort transaction in the order of PMU + useUtilityBinArrayToCalculateLocalUtilityFirstTime(self, dataset) + A method to scan the database using utility bin array to calculate the pmus + + **Executing the code on terminal:** + ------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 TKSHUIM.py <inputFile> <outputFile> <Neighbours> <k> <sep> + + Example Usage: + + (.venv) $ python3 TKSHUIM.py sampleTDB.txt output.txt sampleN.txt 35 + + .. note:: maxMemory will be considered as Maximum memory used by this program for running + + + **Sample run of importing the code:** + ---------------------------------------- + .. code-block:: python + + from PAMI.highUtilitySpatialPattern.topk import TKSHUIM as alg + + obj=alg.TKSHUIM("input.txt","Neighbours.txt",35) + + obj.mine() + + Patterns = obj.getPatterns() + + obj.save("output") + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + --------------- + The complete program was written by Pradeep Pallikila under the supervision of Professor Rage Uday Kiran. + """ + candidateCount = 0 + utilityBinArrayLU = {} + utilityBinArraySU = {} + oldNamesToNewNames = {} + newNamesToOldNames = {} + strToint = {} + intTostr = {} + Neighbours = {} + temp = [0] * 5000 + maxMemory = 0 + startTime = float() + endTime = float() + finalPatterns = {} + iFile = " " + oFile = " " + nFile = " " + sep = "\t" + minUtil = 0 + memoryUSS = float() + memoryRSS = float() + heapList = [] + + def __init__(self, iFile, nFile, k, sep="\t"): + super().__init__(iFile, nFile, k, sep) + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self): + """ + Main function of the program. + """ + self.mine()
+ + +
+[docs] + def mine(self): + """ + Main function of the program. + """ + self.startTime = time.time() + self.finalPatterns = {} + self.dataset = Dataset(self.iFile, self.sep) + with open(self.nFile, 'r') as o: + lines = o.readlines() + for line in lines: + line = line.split("\n")[0] + line_split = line.split(self.sep) + item = self.dataset.strToint.get(line_split[0]) + lst = [] + for i in range(1, len(line_split)): + lst.append(self.dataset.strToint.get(line_split[i])) + self.Neighbours[item] = lst + o.close() + InitialMemory = psutil.virtual_memory()[3] + self.useUtilityBinArrayToCalculateLocalUtilityFirstTime(self.dataset) + itemsToKeep = [] + for key in self.utilityBinArrayLU.keys(): + if self.utilityBinArrayLU[key] >= self.minUtil: + itemsToKeep.append(key) + itemsToKeep = sorted(itemsToKeep, key=lambda x: self.utilityBinArrayLU[x]) + currentName = 1 + for idx, item in enumerate(itemsToKeep): + self.oldNamesToNewNames[item] = currentName + self.newNamesToOldNames[currentName] = item + itemsToKeep[idx] = currentName + currentName += 1 + for transaction in self.dataset.getTransactions(): + transaction.removeUnpromisingItems(self.oldNamesToNewNames) + self.sortDatabase(self.dataset.getTransactions()) + emptyTransactionCount = 0 + for transaction in self.dataset.getTransactions(): + if len(transaction.getItems()) == 0: + emptyTransactionCount += 1 + self.dataset.transactions = self.dataset.transactions[emptyTransactionCount:] + self.useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(self.dataset) + self.heapList = [] + itemsToExplore = [] + for item in itemsToKeep: + if self.utilityBinArraySU[item] >= self.minUtil: + itemsToExplore.append(item) + commonitems = [] + for i in range(self.dataset.maxItem): + commonitems.append(i) + self.backtrackingEFIM(self.dataset.getTransactions(), itemsToKeep, itemsToExplore, 0) + finalMemory = psutil.virtual_memory()[3] + memory = (finalMemory - InitialMemory) / 10000 + if memory > self.maxMemory: + self.maxMemory = memory + self.endTime = time.time() + process = psutil.Process(os.getpid()) + self.memoryUSS = float() + self.memoryRSS = float() + self.memoryUSS = process.memory_full_info().uss + self.memoryRSS = process.memory_info().rss + for item in self.heapList: + self.finalPatterns[item[1]] = item[0] + print('TOP-K mining process is completed by TKSHUIM')
+ + +
+[docs] + def backtrackingEFIM(self, transactionsOfP, itemsToKeep, itemsToExplore, prefixLength): + """ + A method to mine the TKSHUIs Recursively + + :param transactionsOfP: the list of transactions containing the current prefix P + :type transactionsOfP: list + :param itemsToKeep: the list of secondary items in the p-projected database + :type itemsToKeep: list + :param itemsToExplore: the list of primary items in the p-projected database + :type itemsToExplore: list + :param prefixLength: current prefixLength + :type prefixLength: int + """ + self.candidateCount += len(itemsToExplore) + for idx, e in enumerate(itemsToExplore): + initialMemory = psutil.virtual_memory()[3] + transactionsPe = [] + utilityPe = 0 + if len(transactionsOfP) == 0: + break + previousTransaction = transactionsOfP[0] + consecutiveMergeCount = 0 + for transaction in transactionsOfP: + items = transaction.getItems() + if e in items: + positionE = items.index(e) + if transaction.getLastPosition() == positionE: + utilityPe += transaction.getUtilities()[positionE] + transaction.prefixUtility + else: + projectedTransaction = transaction.projectTransaction(positionE) + utilityPe += projectedTransaction.prefixUtility + if previousTransaction == transactionsOfP[0]: + previousTransaction = projectedTransaction + elif self.is_equal(projectedTransaction, previousTransaction): + if consecutiveMergeCount == 0: + items = previousTransaction.items[previousTransaction.offset:] + utilities = previousTransaction.utilities[previousTransaction.offset:] + itemsCount = len(items) + positionPrevious = 0 + positionProjection = projectedTransaction.offset + while positionPrevious < itemsCount: + utilities[positionPrevious] += projectedTransaction.utilities[positionProjection] + positionPrevious += 1 + positionProjection += 1 + previousTransaction.prefixUtility += projectedTransaction.prefixUtility + sumUtilities = previousTransaction.prefixUtility + previousTransaction = Transaction(items, utilities, previousTransaction.transactionUtility + projectedTransaction.transactionUtility) + previousTransaction.prefixUtility = sumUtilities + else: + positionPrevious = 0 + positionProjected = projectedTransaction.offset + itemsCount = len(previousTransaction.items) + while positionPrevious < itemsCount: + previousTransaction.utilities[positionPrevious] += projectedTransaction.utilities[ + positionProjected] + positionPrevious += 1 + positionProjected += 1 + previousTransaction.transactionUtility += projectedTransaction.transactionUtility + previousTransaction.prefixUtility += projectedTransaction.prefixUtility + consecutiveMergeCount += 1 + else: + transactionsPe.append(previousTransaction) + previousTransaction = projectedTransaction + consecutiveMergeCount = 0 + transaction.offset = positionE + if previousTransaction != transactionsOfP[0]: + transactionsPe.append(previousTransaction) + self.temp[prefixLength] = self.newNamesToOldNames[e] + if utilityPe >= self.minUtil: + self.output(prefixLength, utilityPe) + neighbourhoodList = self.calculateNeighbourIntersection(prefixLength) + self.useUtilityBinArraysToCalculateUpperBounds(transactionsPe, idx, itemsToKeep, neighbourhoodList) + newItemsToKeep = [] + newItemsToExplore = [] + for l in range(idx + 1, len(itemsToKeep)): + itemK = itemsToKeep[l] + if self.utilityBinArraySU[itemK] >= self.minUtil: + if itemK in neighbourhoodList: + newItemsToExplore.append(itemK) + newItemsToKeep.append(itemK) + elif self.utilityBinArrayLU[itemK] >= self.minUtil: + if itemK in neighbourhoodList: + newItemsToKeep.append(itemK) + self.backtrackingEFIM(transactionsPe, newItemsToKeep, newItemsToExplore, prefixLength + 1) + finalMemory = psutil.virtual_memory()[3] + memory = (finalMemory - initialMemory) / 10000 + if self.maxMemory < memory: + self.maxMemory = memory
+ + +
+[docs] + def useUtilityBinArraysToCalculateUpperBounds(self, transactionsPe, j, itemsToKeep, neighbourhoodList): + """ + A method to calculate the sub-tree utility and local utility of all items that can extend itemSet P U {e} + + :param transactionsPe: transactions the projected database for P U {e} + :type transactionsPe: list + :param j:the position of j in the list of promising items + :type j:int + :param itemsToKeep :the list of promising items + :type itemsToKeep: list + :param neighbourhoodList: list of neighbourhood elements + :type neighbourhoodList: list + """ + for i in range(j + 1, len(itemsToKeep)): + item = itemsToKeep[i] + self.utilityBinArrayLU[item] = 0 + self.utilityBinArraySU[item] = 0 + for transaction in transactionsPe: + length = len(transaction.getItems()) + i = length - 1 + while i >= transaction.offset: + item = transaction.getItems()[i] + if item in itemsToKeep: + remainingUtility = 0 + if self.newNamesToOldNames[item] in self.Neighbours: + item_neighbours = self.Neighbours[self.newNamesToOldNames[item]] + for k in range(i, length): + transaction_item = transaction.getItems()[k] + if self.newNamesToOldNames[transaction_item] in item_neighbours and transaction_item in neighbourhoodList: + remainingUtility += transaction.getUtilities()[k] + + remainingUtility += transaction.getUtilities()[i] + self.utilityBinArraySU[item] += remainingUtility + transaction.prefixUtility + self.utilityBinArrayLU[item] += transaction.transactionUtility + transaction.prefixUtility + i -= 1
+ + +
+[docs] + def calculateNeighbourIntersection(self, prefixLength): + """ + A method to find common Neighbours + + :param prefixLength: the prefix itemSet + :type prefixLength:int + """ + intersectionList = self.Neighbours.get(self.temp[0]) + for i in range(1, prefixLength+1): + intersectionList = self.intersection(self.Neighbours[self.temp[i]], intersectionList) + finalIntersectionList = [] + if intersectionList is None: + return finalIntersectionList + for item in intersectionList: + if item in self.oldNamesToNewNames: + finalIntersectionList.append(self.oldNamesToNewNames[item]) + return finalIntersectionList
+ + +
+[docs] + def output(self, tempPosition, utility): + """ + A method save all high-utility itemSet to file or memory depending on what the user chose + + :param tempPosition: position of last item + :type tempPosition : int + :param utility: total utility of itemSet + :type utility: int + """ + s1 = str() + for i in range(0, tempPosition+1): + s1 += self.dataset.intTostr.get((self.temp[i])) + if i != tempPosition: + s1 += "\t" + self.additemset(s1, utility)
+ + +
+[docs] + def is_equal(self, transaction1, transaction2): + """ + A method to Check if two transaction are identical + + :param transaction1: the first transaction. + :type transaction1: Transaction + :param transaction2: the second transaction. + :type transaction2: Transaction + :return : whether both are identical or not + :rtype: bool + """ + + length1 = len(transaction1.items) - transaction1.offset + length2 = len(transaction2.items) - transaction2.offset + if length1 != length2: + return False + position1 = transaction1.offset + position2 = transaction2.offset + while position1 < len(transaction1.items): + if transaction1.items[position1] != transaction2.items[position2]: + return False + position1 += 1 + position2 += 1 + return True
+ + +
+[docs] + def intersection(self, lst1, lst2): + """ + A method that return the intersection of 2 list + + :param lst1: items neighbour to item1 + :type lst1: list + :param lst2: items neighbour to item2 + :type lst2: list + :return :intersection of two lists + :rtype : list + """ + temp = set(lst2) + lst3 = [value for value in lst1 if value in temp] + return lst3
+ + +
+[docs] + def useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(self, dataset): + """ + Scan the initial database to calculate the subtree utility of each item using a utility-bin array + + :param dataset: the transaction database + :type dataset: Dataset + """ + for transaction in dataset.getTransactions(): + items = transaction.getItems() + utilities = transaction.getUtilities() + for idx, item in enumerate(items): + if item not in self.utilityBinArraySU: + self.utilityBinArraySU[item] = 0 + if self.newNamesToOldNames[item] not in self.Neighbours: + self.utilityBinArraySU[item] += utilities[idx] + continue + i = idx + 1 + sumSu = utilities[idx] + while i < len(items): + if self.newNamesToOldNames[items[i]] in self.Neighbours[self.newNamesToOldNames[item]]: + sumSu += utilities[i] + i += 1 + self.utilityBinArraySU[item] += sumSu
+ + +
+[docs] + def sortDatabase(self, transactions): + """ + A Method to sort transaction in the order of PMU + + :param transactions: transaction of items + :type transactions: Transaction + :return: sorted transaction + :rtype: Transaction + """ + cmp_items = cmp_to_key(self.sort_transaction) + transactions.sort(key=cmp_items)
+ + +
+[docs] + def sort_transaction(self, trans1, trans2): + """ + A Method to sort transaction in the order of PMU + + :param trans1: the first transaction. + :type trans1: Transaction + :param trans2:the second transaction. + :type trans2: Transaction + :return: sorted transaction. + :rtype: int + """ + trans1_items = trans1.getItems() + trans2_items = trans2.getItems() + pos1 = len(trans1_items) - 1 + pos2 = len(trans2_items) - 1 + if len(trans1_items) < len(trans2_items): + while pos1 >= 0: + sub = trans2_items[pos2] - trans1_items[pos1] + if sub != 0: + return sub + pos1 -= 1 + pos2 -= 1 + return -1 + elif len(trans1_items) > len(trans2_items): + while pos2 >= 0: + sub = trans2_items[pos2] - trans1_items[pos1] + if sub != 0: + return sub + pos1 -= 1 + pos2 -= 1 + return 1 + else: + while pos2 >= 0: + sub = trans2_items[pos2] - trans1_items[pos1] + if sub != 0: + return sub + pos1 -= 1 + pos2 -= 1 + return 0
+ + +
+[docs] + def useUtilityBinArrayToCalculateLocalUtilityFirstTime(self, dataset): + """ + A method to scan the database using utility bin array to calculate the pmus + + :param dataset: the transaction database. + :type dataset: database + """ + utilityMatrix = defaultdict(lambda: defaultdict(int)) + for transaction in dataset.getTransactions(): + for idx, item in enumerate(transaction.getItems()): + pmu = transaction.getUtilities()[idx] + if item in self.Neighbours: + neighbors = self.Neighbours[item] + for idx, item in enumerate(transaction.getItems()): + if item in neighbors: + pmu += transaction.getUtilities()[idx] + if item in self.utilityBinArrayLU: + # self.utilityBinArrayLU[item] += transaction.getPmus()[idx] + self.utilityBinArrayLU[item] += pmu + else: + # self.utilityBinArrayLU[item] = transaction.getPmus()[idx] + self.utilityBinArrayLU[item] = pmu + utilityMatrix[item][item] += transaction.getUtilities()[idx] + if item in self.Neighbours: + neighbors = self.Neighbours[item] + utility = transaction.getUtilities()[idx] + for i, itemj in enumerate(transaction.getItems()): + if (itemj != item) and (itemj in neighbors): + utilityMatrix[item][itemj] += (utility + transaction.getUtilities()[i]) + + for item in utilityMatrix.keys(): + for itemj in utilityMatrix[item].keys(): + if itemj >= item: + val = utilityMatrix[item][itemj] + if val != 0 and val > self.minUtil: + if itemj == item: + itemset = str(item) + else: + itemset = str(item) + str(itemj) + self.additemset(itemset, val)
+ + +
+[docs] + def additemset(self, itemset, utility): + """ + adds the itemset to the priority queue + + :param itemset: the itemset to be added + + :type itemset: str + + :param utility: utility matrix for the itemset to be added + + :type utility: numpy.array + """ + heapq.heappush(self.heapList, (utility, itemset)) + if len(self.heapList) > self.k: + while len(self.heapList) > self.k: + heapq.heappop(self.heapList) + if len(self.heapList) == 0: + break + self.minUtil = heapq.nsmallest(1, self.heapList)[0][0]
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final patterns in a dataframe + + :return: returning patterns in a dataframe + :rtype: pd.DataFrame + """ + dataFrame = {} + data = [] + for a, b in self.finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = pd.DataFrame(data, columns=['Patterns', 'Utility']) + + return dataFrame
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of patterns after completion of the mining process + + :return: returning patterns + :rtype: dict + """ + return self.finalPatterns
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self.oFile = outFile + writer = open(self.oFile, 'w+') + for x, y in self.finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self.memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self.memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self.endTime-self.startTime
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Top K Spatial High Utility Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in seconds:", self.getRuntime())
+
+ + +
+[docs] +def main(): + inputFile = 'mushroom_utility_spmf.txt' + neighborFile = 'mushroom_neighbourhood.txt' #Users can also specify this constraint between 0 to 1. + k = 1000 + seperator = ' ' + obj = TKSHUIM(iFile=inputFile, nFile=neighborFile, k=k, sep=seperator) #initialize + obj.startMine() + obj.mine() + obj.printResults() + print(obj.getPatterns())
+ + +if __name__ == '__main__': + main() + # _ap = str() + # if len(sys.argv) == 5 or len(sys.argv) == 6: + # if len(sys.argv) == 6: + # _ap = TKSHUIM(sys.argv[1], sys.argv[3], int(sys.argv[4]), sys.argv[5]) + # if len(sys.argv) == 5: + # _ap = TKSHUIM(sys.argv[1], sys.argv[3], int(sys.argv[4])) + # _ap.startMine() + # _ap.mine() + # print("Top K Spatial High Utility Patterns:", len(_ap.getPatterns())) + # _ap.save(sys.argv[2]) + # print("Total Memory in USS:", _ap.getMemoryUSS()) + # print("Total Memory in RSS", _ap.getMemoryRSS()) + # print("Total ExecutionTime in seconds:", _ap.getRuntime()) + # else: + # for i in [1000, 5000]: + # _ap = TKSHUIM('/Users/Likhitha/Downloads/mushroom_main_2000.txt', + # '/Users/Likhitha/Downloads/mushroom_neighbors_2000.txt', i, ' ') + # _ap.startMine() + # _ap.mine() + # print("Total number of Spatial High Utility Patterns:", len(_ap.getPatterns())) + # print("Total Memory in USS:", _ap.getMemoryUSS()) + # print("Total Memory in RSS", _ap.getMemoryRSS()) + # print("Total ExecutionTime in seconds:", _ap.getRuntime()) + # print("Error! The number of input parameters do not match the total number of parameters provided") + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/highUtilitySpatialPattern/topk/abstract.html b/sphinx/_build/html/_modules/PAMI/highUtilitySpatialPattern/topk/abstract.html new file mode 100644 index 000000000..a54e440cb --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/highUtilitySpatialPattern/topk/abstract.html @@ -0,0 +1,342 @@ + + + + + + PAMI.highUtilitySpatialPattern.topk.abstract — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.highUtilitySpatialPattern.topk.abstract

+#  Copyright (C)  2021 Rage Uday Kiran
+#
+#      This program is free software: you can redistribute it and/or modify
+#      it under the terms of the GNU General Public License as published by
+#      the Free Software Foundation, either version 3 of the License, or
+#      (at your option) any later version.
+#
+#      This program is distributed in the hope that it will be useful,
+#      but WITHOUT ANY WARRANTY; without even the implied warranty of
+#      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#      GNU General Public License for more details.
+#
+#      You should have received a copy of the GNU General Public License
+#      along with this program.  If not, see <https://www.gnu.org/licenses/>.
+#
+#      This program is free software: you can redistribute it and/or modify
+#      it under the terms of the GNU General Public License as published by
+#      the Free Software Foundation, either version 3 of the License, or
+#      (at your option) any later version.
+#
+#      This program is distributed in the hope that it will be useful,
+#      but WITHOUT ANY WARRANTY; without even the implied warranty of
+#      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#      GNU General Public License for more details.
+#
+#      You should have received a copy of the GNU General Public License
+#      along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+from abc import ABC, abstractmethod
+import time
+import validators
+from urllib.request import urlopen
+import csv
+import pandas as pd
+from collections import defaultdict
+from itertools import combinations as c
+import os
+import os.path
+import psutil
+import sys
+from urllib.request import urlopen
+
+
+
+[docs] +class utilityPatterns(ABC): + """ + :Description: This abstract base class defines the variables and methods that every topk spatial high utility pattern mining algorithm must + employ in PAMI + + :Attributes: + + iFile : str + Input file name or path of the input file + k: integer + The user can specify k (top-k) + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator + startTime:float + To record the start time of the algorithm + endTime:float + To record the completion time of the algorithm + finalPatterns: dict + Storing the complete set of patterns in a dictionary variable + oFile : str + Name of the output file to store complete set of frequent patterns + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + :Methods: + + startMine() + Calling this function will start the actual mining process + getPatterns() + This function will output all interesting patterns discovered by an algorithm + save(oFile) + This function will store the discovered patterns in an output file specified by the user + getPatternsAsDataFrame() + The function outputs the patterns generated by an algorithm as a data frame + getMemoryUSS() + This function outputs the total amount of USS memory consumed by a mining algorithm + getMemoryRSS() + This function outputs the total amount of RSS memory consumed by a mining algorithm + getRuntime() + This function outputs the total runtime of a mining algorithm + """ + + def __init__(self, iFile, nFile, k, sep="\t"): + """ + :param iFile: Input file name or path of the input file + :type iFile: str + :param nFile: Input file name or path of the neighbourhood file + :type nFile: str + :param k: The user can specify k in count + :type k: int + :param sep: separator used to distinguish items from each other. The default separator is tab space. However, users can override the default separator + :type sep: str + """ + + self.iFile = iFile + self.sep = sep + self.nFile = nFile + self.k = k + +
+[docs] + @abstractmethod + def iFile(self): + """Variable to store the input file path/file name""" + pass
+ + +
+[docs] + @abstractmethod + def nFile(self): + """Variable to store the neighbourhood file path/file name""" + pass
+ + +
+[docs] + @abstractmethod + def startTime(self): + """Variable to store the start time of the mining process""" + pass
+ + +
+[docs] + @abstractmethod + def endTime(self): + """Variable to store the end time of the complete program""" + pass
+ + +
+[docs] + @abstractmethod + def memoryUSS(self): + """Variable to store USS memory consumed by the program""" + pass
+ + +
+[docs] + @abstractmethod + def memoryRSS(self): + """Variable to store RSS memory consumed by the program""" + pass
+ + +
+[docs] + @abstractmethod + def finalPatterns(self): + """Variable to store the complete set of patterns in a dictionary""" + pass
+ + +
+[docs] + @abstractmethod + def oFile(self): + """Variable to store the name of the output file to store the complete set of frequent patterns""" + pass
+ + +
+[docs] + @abstractmethod + def startMine(self): + """Code for the mining process will start from this function""" + pass
+ + +
+[docs] + @abstractmethod + def getPatterns(self): + """Complete set of patterns generated will be retrieved from this function""" + pass
+ + +
+[docs] + @abstractmethod + def save(self, oFile): + """Complete set of patterns will be saved in to an output file from this function + + :param oFile: Name of the output file + :type oFile: csv file + """ + pass
+ + +
+[docs] + @abstractmethod + def getPatternsAsDataFrame(self): + """Complete set of generated patterns will be loaded in to data frame from this function""" + pass
+ + +
+[docs] + @abstractmethod + def getMemoryUSS(self): + """Total amount of USS memory consumed by the program will be retrieved from this function""" + pass
+ + +
+[docs] + @abstractmethod + def getMemoryRSS(self): + """Total amount of RSS memory consumed by the program will be retrieved from this function""" + pass
+ + + +
+[docs] + @abstractmethod + def getRuntime(self): + """Total amount of runtime taken by the program will be retrieved from this function""" + pass
+ + +
+[docs] + @abstractmethod + def printResults(self): + """ To print all the results of execution""" + + pass
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/localPeriodicPattern/basic/LPPGrowth.html b/sphinx/_build/html/_modules/PAMI/localPeriodicPattern/basic/LPPGrowth.html new file mode 100644 index 000000000..53bb34952 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/localPeriodicPattern/basic/LPPGrowth.html @@ -0,0 +1,1044 @@ + + + + + + PAMI.localPeriodicPattern.basic.LPPGrowth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.localPeriodicPattern.basic.LPPGrowth

+# Local Periodic Patterns, which are patterns (sets of events) that have a periodic behavior in some non predefined
+# time-intervals. A pattern is said to be a local periodic pattern if it appears regularly and continuously in some
+# time-intervals. The maxSoPer (maximal period of spillovers) measure allows detecting time-intervals of variable
+# lengths where a pattern is continuously periodic, while the minDur (minimal duration) measure ensures that those
+# time-intervals have a minimum duration.
+#
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.localPeriodicPattern.basic import LPPGrowth as alg
+#
+#             obj = alg.LPPGrowth(iFile, maxPer, maxSoPer, minDur)
+#
+#             obj.mine()
+#
+#             localPeriodicPatterns = obj.getPatterns()
+#
+#             print(f'Total number of local periodic patterns: {len(localPeriodicPatterns)}')
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print(f'Total memory in USS: {memUSS}')
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print(f'Total memory in RSS: {memRSS}')
+#
+#             runtime = obj.getRuntime()
+#
+#             print(f'Total execution time in seconds: {runtime})
+#
+
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+
+
+from PAMI.localPeriodicPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+from deprecated import deprecated
+
+
+[docs] +class Node: + """ + A class used to represent the node of localPeriodicPatternTree + + :Attributes: + + item : int + storing item of a node + parent : node + To maintain the parent of every node + child : list + To maintain the children of node + nodeLink : node + To maintain the next node of node + tidList : set + To maintain timestamps of node + + :Methods: + + getChild(itemName) + storing the children to their respective parent nodes + """ + + def __init__(self) -> None: + self.item = -1 + self.parent = None + self.child = [] + self.nodeLink = None + self.tidList = set() + +
+[docs] + def getChild(self, item: int) -> 'Node': + """ + This function is used to get child node from the parent node + + :param item: item of the parent node + + :type item: int + + :return: if node have node of item, then return it. if node don't have return [] + + :rtype: Node + """ + for child in self.child: + if child.item == item: + return child + return []
+
+ + + +
+[docs] +class Tree: + """ + A class used to represent the frequentPatternGrowth tree structure + + :Attributes: + + root : node + Represents the root node of the tree + nodeLinks : dictionary + storing last node of each item + firstNodeLink : dictionary + storing first node of each item + + :Methods: + + addTransaction(transaction,timeStamp) + creating transaction as a branch in frequentPatternTree + fixNodeLinks(itemName, newNode) + add newNode link after last node of item + deleteNode(itemName) + delete all node of item + createPrefixTree(path,timeStampList) + create prefix tree by path + + """ + def __init__(self) -> None: + self.root = Node() + self.nodeLinks = {} + self.firstNodeLink = {} + +
+[docs] + def addTransaction(self, transaction: List[int], tid: int) -> None: + """ + add transaction into tree + + :param transaction: it represents the one transaction in database + :type transaction: list + :param tid: represents the timestamp of transaction + :type tid: list or int + :return: None + """ + current = self.root + for item in transaction: + child = current.getChild(item) + if not child: + newNode = Node() + newNode.item = item + newNode.parent = current + current.child.append(newNode) + current = newNode + self.fixNodeLinks(item, newNode) + else: + current = child + current.tidList.add(tid)
+ + + + + +
+[docs] + def deleteNode(self, item: int) -> None: + """ + delete the node from tree + + :param item: it represents the item name of node + :type item: str + :return: None + """ + deleteNode = self.firstNodeLink[item] + parentNode = deleteNode.parent + parentNode.child.remove(deleteNode) + parentNode.child += deleteNode.child + parentNode.tidList |= deleteNode.tidList + for child in deleteNode.child: + child.parent = parentNode + while deleteNode.nodeLink: + deleteNode = deleteNode.nodeLink + parentNode = deleteNode.parent + parentNode.child.remove(deleteNode) + parentNode.child += deleteNode.child + parentNode.tidList |= deleteNode.tidList + for child in deleteNode.child: + child.parent = parentNode
+ + +
+[docs] + def createPrefixTree(self, path: List[int], tidList: List[int]) -> None: + """ + create prefix tree by path + + :param path: it represents path to root from prefix node + :type path: list + :param tidList: it represents tid of each item + :type tidList: list + :return: None + """ + currentNode = self.root + for item in path: + child = currentNode.getChild(item) + if not child: + newNode = Node() + newNode.item = item + newNode.parent = currentNode + currentNode.child.append(newNode) + currentNode = newNode + self.fixNodeLinks(item, newNode) + else: + currentNode = child + currentNode.tidList |= tidList
+
+ + + +
+[docs] +class LPPGrowth(_ab._localPeriodicPatterns): + """ + :Description: + + Local Periodic Patterns, which are patterns (sets of events) that have a periodic behavior in some non predefined + time-intervals. A pattern is said to be a local periodic pattern if it appears regularly and continuously in some + time-intervals. The maxSoPer (maximal period of spillovers) measure allows detecting time-intervals of variable + lengths where a pattern is continuously periodic, while the minDur (minimal duration) measure ensures that those + time-intervals have a minimum duration. + + :Reference: + + Fournier-Viger, P., Yang, P., Kiran, R. U., Ventura, S., Luna, J. M.. (2020). Mining Local Periodic Patterns in + a Discrete Sequence. Information Sciences, Elsevier, to appear. [ppt] DOI: 10.1016/j.ins.2020.09.044 + + :param iFile: str : + Name of the Input file to mine complete set of local periodic pattern's + :param oFile: str : + Name of the output file to store complete set of local periodic patterns + :param minDur: str: + Minimal duration in seconds between consecutive periods of time-intervals where a pattern is continuously periodic. + :param maxPer: float: + Controls the maximum number of transactions in which any two items within a pattern can reappear. + :param maxSoPer: float: + Controls the maximum number of time periods between consecutive periods of time-intervals where a pattern is continuously periodic. + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + iFile : str + Input file name or path of the input file + oFile : str + Output file name or path of the output file + maxPer : float + User defined maxPer value. + maxSoPer : float + User defined maxSoPer value. + minDur : float + User defined minDur value. + tsMin : int / date + First time stamp of input data. + tsMax : int / date + Last time stamp of input data. + startTime : float + Time when start of execution the algorithm. + endTime : float + Time when end of execution the algorithm. + finalPatterns : dict + To store local periodic patterns and its PTL. + tsList : dict + To store items and its time stamp as bit vector. + root : Tree + It is root node of transaction tree of whole input data. + PTL : dict + Storing the item and its PTL. + items : list + Storing local periodic item list. + sep: str + separator used to distinguish items from each other. The default separator is tab space. + + :Methods: + + findSeparator(line) + Find the separator of the line which split strings. + creteLPPlist() + Create the local periodic patterns list from input data. + createTSList() + Create the tsList as bit vector from input data. + generateLPP() + Generate 1 length local periodic pattens by tsList and execute depth first search. + createLPPTree() + Create LPPTree of local periodic item from input data. + patternGrowth(tree, prefix, prefixPFList) + Execute pattern growth algorithm. It is important function in this program. + calculatePTL(tsList) + Calculate PTL from input tsList as integer list. + calculatePTLbit(tsList) + Calculate PTL from input tsList as bit vector. + mine() + Mining process will start from here. + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function. + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function. + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function. + getLocalPeriodicPatterns() + return local periodic patterns and its PTL + save(oFile) + Complete set of local periodic patterns will be loaded in to an output file. + getPatternsAsDataFrame() + Complete set of local periodic patterns will be loaded in to a dataframe. + + **Executing the code on terminal:** + --------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 LPPMGrowth.py <inputFile> <outputFile> <maxPer> <minSoPer> <minDur> + + Example Usage: + + (.venv) $ python3 LPPMGrowth.py sampleDB.txt patterns.txt 0.3 0.4 0.5 + + .. note: minDur will be considered as time interval between two consecutive periods + + + **Sample run of importing the code:** + ---------------------------------------- + .. code-block:: python + + from PAMI.localPeriodicPattern.basic import LPPGrowth as alg + + obj = alg.LPPGrowth(iFile, maxPer, maxSoPer, minDur) + + obj.mine() + + localPeriodicPatterns = obj.getPatterns() + + print(f'Total number of local periodic patterns: {len(localPeriodicPatterns)}') + + obj.save(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print(f'Total memory in USS: {memUSS}') + + memRSS = obj.getMemoryRSS() + + print(f'Total memory in RSS: {memRSS}') + + runtime = obj.getRuntime() + + print(f'Total execution time in seconds: {runtime}) + + **Credits:** + -------------- + The complete program was written by So Nakamura under the supervision of Professor Rage Uday Kiran. + """ + _localPeriodicPatterns__iFile = ' ' + _localPeriodicPatterns__oFile = ' ' + _localPeriodicPatterns__maxPer = str() + _localPeriodicPatterns__maxSoPer = str() + _localPeriodicPatterns__minDur = str() + __tsMin = 0 + __tsMax = 0 + _localPeriodicPatterns__startTime = float() + _localPeriodicPatterns__endTime = float() + _localPeriodicPatterns__memoryUSS = float() + _localPeriodicPatterns__memoryRSS = float() + _localPeriodicPatterns__finalPatterns = {} + __tsList = {} + __root = Tree() + __PTL = {} + __items = [] + _localPeriodicPatterns__sep = ' ' + __Database = [] + + def __creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self.__Database = [] + if isinstance(self._localPeriodicPatterns__iFile, _ab._pd.DataFrame): + if self._localPeriodicPatterns__iFile.empty: + print("its empty..") + i = self._localPeriodicPatterns__iFile.columns.values.tolist() + if 'Transactions' in i: + self.__Database = self._localPeriodicPatterns__iFile['Transactions'].tolist() + if 'Patterns' in i: + self.__Database = self._localPeriodicPatterns__iFile['Patterns'].tolist() + + if isinstance(self._localPeriodicPatterns__iFile, str): + if _ab._validators.url(self._localPeriodicPatterns__iFile): + data = _ab._urlopen(self._localPeriodicPatterns__iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._localPeriodicPatterns__sep)] + temp = [x for x in temp if x] + self.__Database.append(temp) + else: + try: + with open(self._localPeriodicPatterns__iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._localPeriodicPatterns__sep)] + temp = [x for x in temp if x] + self.__Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def __createLPPlist(self) -> None: + """ + Create Local Periodic Pattern list from temporal data. + """ + LPPList = {} + PTL = {} + start = {} + tsPre = {} + for line in self.__Database: + soPer = ' ' + self.__tsMin = int(line.pop(0)) + ts = self.__tsMin + for item in line: + if item in LPPList: + per = ts - tsPre[item] + if per <= self._localPeriodicPatterns__maxPer and start == -1: + start = tsPre[item] + soPer = self._localPeriodicPatterns__maxSoPer + if start != -1: + soPer = max(0, soPer + per - self._localPeriodicPatterns__maxPer) + if soPer > self._localPeriodicPatterns__maxSoPer: + if tsPre[item] - start[item] <= self._localPeriodicPatterns__minDur: + PTL[item].add((start[item], tsPre[item])) + LPPList[item] = PTL[item] + start[item] = -1 + else: + tsPre[item] = ts + start[item] = -1 + LPPList[item] = set() + for line in self.__Database: + ts = int(line.pop(0)) + for item in line: + if item in LPPList: + per = ts - tsPre[item] + if per <= self._localPeriodicPatterns__maxPer and start[item] == -1: + start[item] = tsPre[item] + soPer = self._localPeriodicPatterns__maxSoPer + if start[item] != -1: + soPer = max(0, soPer + per - self._localPeriodicPatterns__maxPer) + if soPer > self._localPeriodicPatterns__maxSoPer: + PTL[item].add((start[item], tsPre[item])) + LPPList[item] = PTL[item] + start[item] = -1 + tsPre[item] = ts + else: + tsPre[item] = ts + start[item] = -1 + LPPList[item] = set() + + def __createTSList(self) -> None: + """ + Create tsList as bit vector from temporal data. + """ + # for line in self.__Database: + # count = 1 + # bitVector = 0b1 << count + # bitVector = bitVector | 0b1 + # self.__tsMin = int(line.pop(0)) + # self.__tsList = {item: bitVector for item in line} + # count += 1 + # ts = ' ' + count = 1 + for line in self.__Database: + bitVector = 0b1 << count + bitVector = bitVector | 0b1 + # print(line) + ts = line[0] + for item in line[1:]: + if self.__tsList.get(item): + different = abs(bitVector.bit_length() - self.__tsList[item].bit_length()) + self.__tsList[item] = self.__tsList[item] << different + self.__tsList[item] = self.__tsList[item] | 0b1 + else: + self.__tsList[item] = bitVector + count += 1 + self.__tsMax = int(ts) + + for item in self.__tsList: + different = abs(bitVector.bit_length() - self.__tsList[item].bit_length()) + self.__tsList[item] = self.__tsList[item] << different + self._localPeriodicPatterns__maxPer = self.__convert(self._localPeriodicPatterns__maxPer) + self._localPeriodicPatterns__maxSoPer = self.__convert(self._localPeriodicPatterns__maxSoPer) + self._localPeriodicPatterns__minDur = self.__convert(self._localPeriodicPatterns__minDur) + + def __generateLPP(self) -> None: + """ + Generate local periodic items from bit vector tsList. + """ + PTL = {} + for item in self.__tsList: + PTL[item] = set() + ts = list(bin(self.__tsList[item])) + ts = ts[2:] + start = -1 + currentTs = 1 + soPer = ' ' + tsPre = ' ' + for t in ts[currentTs:]: + if t == '0': + currentTs += 1 + continue + else: + tsPre = currentTs + currentTs += 1 + break + for t in ts[currentTs:]: + if t == '0': + currentTs += 1 + continue + else: + per = currentTs - tsPre + if per <= self._localPeriodicPatterns__maxPer and start == -1: + start = tsPre + soPer = self._localPeriodicPatterns__maxSoPer + if start != -1: + soPer = max(0, soPer + per - self._localPeriodicPatterns__maxPer) + if soPer > self._localPeriodicPatterns__maxSoPer: + if tsPre - start >= self._localPeriodicPatterns__minDur: + PTL[item].add((start, tsPre)) + """else: + bitVector = 0b1 << currentTs + different = abs(self.tsList[item].bit_length() - bitVector.bit_length()) + bitVector = bitVector | 0b1 + bitVector = bitVector << different + self.tsList[item] = self.tsList[item] | bitVector""" + start = -1 + tsPre = currentTs + currentTs += 1 + if start != -1: + soPer = max(0, soPer + self.__tsMax - tsPre - self._localPeriodicPatterns__maxPer) + if soPer > self._localPeriodicPatterns__maxSoPer and tsPre - start >= self._localPeriodicPatterns__minDur: + PTL[item].add((start, tsPre)) + """else: + bitVector = 0b1 << currentTs+1 + different = abs(self.tsList[item].bit_length() - bitVector.bit_length()) + bitVector = bitVector | 0b1 + bitVector = bitVector << different + self.tsList[item] = self.tsList[item] | bitVector""" + if soPer <= self._localPeriodicPatterns__maxSoPer and self.__tsMax - start >= self._localPeriodicPatterns__minDur: + PTL[item].add((start, self.__tsMax)) + """else: + bitVector = 0b1 << currentTs+1 + different = abs(self.tsList[item].bit_length() - bitVector.bit_length()) + bitVector = bitVector | 0b1 + bitVector = bitVector << different + self.tsList[item] = self.tsList[item] | bitVector""" + self.__PTL = {k: v for k, v in PTL.items() if len(v) > 0} + self.__items = list(self.__PTL.keys()) + + def __createLPPTree(self) -> None: + """ + Create transaction tree of local periodic item from input data. + """ + for line in self.__Database: + ts = int(line[0]) + tempTransaction = [item for item in line[1:] if item in self.__items] + transaction = sorted(tempTransaction, key=lambda x: len(self.__PTL[x]), reverse=True) + self.__root.addTransaction(transaction, ts) + # for line in self.__Database: + # tid = int(transaction[0]) + # tempTransaction = [item for item in transaction[1:] if item in self.__items] + # transaction = sorted(tempTransaction, key=lambda x: len(self.__PTL[x]), reverse=True) + # self.__root.addTransaction(transaction, tid) + + def __patternGrowth(self, tree: 'Tree', prefix: List[int], prefixPFList: Dict[Any, Any]) -> None: + """ + Create prefix tree and prefixPFList. Store finalPatterns and its PTL. + + :param tree: The root node of prefix tree. + :type tree: Node or Tree + :param prefix: Prefix item list. + :type prefix: list + :param prefixPFList: tsList of prefix patterns. + :type prefixPFList: dict or list + :return: None + """ + items = list(prefixPFList) + if not prefix: + items = reversed(items) + for item in items: + prefixCopy = prefix.copy() + prefixCopy.append(item) + PFList = {} + prefixTree = Tree() + prefixNode = tree.firstNodeLink[item] + tidList = prefixNode.tidList + path = [] + currentNode = prefixNode.parent + while currentNode.item != -1: + path.insert(0, currentNode.item) + currentNodeItem = currentNode.item + if currentNodeItem in PFList: + PFList[currentNodeItem] |= tidList + else: + PFList[currentNodeItem] = tidList + currentNode = currentNode.parent + prefixTree.createPrefixTree(path, tidList) + while prefixNode.nodeLink: + prefixNode = prefixNode.nodeLink + tidList = prefixNode.tidList + path = [] + currentNode = prefixNode.parent + while currentNode.item != -1: + path.insert(0, currentNode.item) + currentNodeItem = currentNode.item + if currentNodeItem in PFList: + PFList[currentNodeItem] = PFList[currentNodeItem] | tidList + else: + PFList[currentNodeItem] = tidList + currentNode = currentNode.parent + prefixTree.createPrefixTree(path, tidList) + if len(prefixCopy) == 1: + self._localPeriodicPatterns__finalPatterns[prefixCopy[0]] = self.__calculatePTLbit(self.__tsList[item]) + else: + self._localPeriodicPatterns__finalPatterns[tuple(prefixCopy)] = self.__calculatePTL(prefixPFList[item]) + candidateItems = list(PFList) + for i in candidateItems: + PTL = self.__calculatePTL(PFList[i]) + if len(PTL) == 0: + prefixTree.deleteNode(i) + del PFList[i] + if PFList: + self.__patternGrowth(prefixTree, prefixCopy, PFList) + + def __calculatePTL(self, tsList: List[int]) -> set: + """ + Calculate PTL from input tsList as integer list + + :param tsList: It is tsList which store time stamp as integer. + :type tsList: list + :return: PTL + :rtype: set + """ + start = -1 + PTL = set() + tsList = sorted(tsList) + tsPre = tsList[0] + soPer = ' ' + for ts in tsList[1:]: + per = ts - tsPre + if per <= self._localPeriodicPatterns__maxPer and start == -1: + start = tsPre + soPer = self._localPeriodicPatterns__maxSoPer + if start != -1: + soPer = max(0, soPer + per - self._localPeriodicPatterns__maxPer) + if soPer > self._localPeriodicPatterns__maxSoPer: + if tsPre - start >= self._localPeriodicPatterns__minDur: + PTL.add((start, tsPre)) + start = -1 + tsPre = ts + if start != -1: + soPer = max(0, soPer + self.__tsMax - tsPre - self._localPeriodicPatterns__maxPer) + if soPer > self._localPeriodicPatterns__maxSoPer and tsPre - start >= self._localPeriodicPatterns__minDur: + PTL.add((start, tsPre)) + if soPer <= self._localPeriodicPatterns__maxSoPer and self.__tsMax - start >= self._localPeriodicPatterns__minDur: + PTL.add((start, self.__tsMax)) + return PTL + + def __calculatePTLbit(self, tsList: List[int]) -> set: + """ + Calculate PTL from input tsList as bit vector. + + :param tsList: It is tsList which store time stamp as bit vector. + :type tsList: list + :return: PTL + :rtype: set + """ + tsList = list(bin(tsList)) + tsList = tsList[2:] + start = -1 + currentTs = 1 + PTL = set() + tsPre = ' ' + soPer = ' ' + for ts in tsList[currentTs:]: + if ts == '0': + currentTs += 1 + continue + else: + tsPre = currentTs + currentTs += 1 + break + for ts in tsList[currentTs:]: + if ts == '0': + currentTs += 1 + continue + else: + per = currentTs - tsPre + if per <= self._localPeriodicPatterns__maxPer and start == -1: + start = tsPre + soPer = self._localPeriodicPatterns__maxSoPer + if start != -1: + soPer = max(0, soPer + per - self._localPeriodicPatterns__maxPer) + if soPer > self._localPeriodicPatterns__maxSoPer: + if tsPre - start >= self._localPeriodicPatterns__minDur: + PTL.add((start, tsPre)) + start = -1 + tsPre = currentTs + currentTs += 1 + if start != -1: + soPer = max(0, soPer + self.__tsMax - tsPre - self._localPeriodicPatterns__maxPer) + if soPer > self._localPeriodicPatterns__maxSoPer and tsPre - start >= self._localPeriodicPatterns__minDur: + PTL.add((start, tsPre)) + if soPer <= self._localPeriodicPatterns__maxSoPer and self.__tsMax - start >= self._localPeriodicPatterns__minDur: + PTL.add((start, tsPre)) + return PTL + + def __convert(self, value: Any) -> float: + """ + to convert the type of user specified minSup value + + :param value: user specified minSup value + :type value: int or float or str + :return: converted type + :rtype: float + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self.__Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self.__Database) * value) + else: + value = int(value) + return value + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + Mining process start from here. + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + Mining process start from here. + """ + self._localPeriodicPatterns__startTime = _ab._time.time() + self._localPeriodicPatterns__finalPatterns = {} + self.__creatingItemSets() + self._localPeriodicPatterns__maxPer = self.__convert(self._localPeriodicPatterns__maxPer) + self._localPeriodicPatterns__maxSoPer = self.__convert(self._localPeriodicPatterns__maxSoPer) + self._localPeriodicPatterns__minDur = self.__convert(self._localPeriodicPatterns__minDur) + self.__createTSList() + self.__generateLPP() + self.__createLPPTree() + self.__patternGrowth(self.__root, [], self.__items) + self._localPeriodicPatterns__endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._localPeriodicPatterns__memoryUSS = float() + self._localPeriodicPatterns__memoryRSS = float() + self._localPeriodicPatterns__memoryUSS = process.memory_full_info().uss + self._localPeriodicPatterns__memoryRSS = process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._localPeriodicPatterns__memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._localPeriodicPatterns__memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._localPeriodicPatterns__endTime - self._localPeriodicPatterns__startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> '_ab._pd.DataFrame': + """ + Storing final local periodic patterns in a dataframe + + :return: returning local periodic patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._localPeriodicPatterns__finalPatterns.items(): + pat = str() + for i in a: + pat = pat + i + ' ' + data.append([pat, b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'PTL']) + return dataFrame
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of local periodic patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._localPeriodicPatterns__oFile = outFile + writer = open(self._localPeriodicPatterns__oFile, 'w+') + for x, y in self._localPeriodicPatterns__finalPatterns.items(): + pat = str() + for i in x: + pat = pat + i + '\t' + pat = pat + ":" + for i in y: + pat = pat + str(i) + '\t' + patternsAndPTL = pat.strip() + writer.write("%s \n" % patternsAndPTL)
+ + +
+[docs] + def getPatterns(self) -> Dict: + """ + Function to send the set of local periodic patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._localPeriodicPatterns__finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + """ + print("Total number of Local Periodic Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == '__main__': + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = LPPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], float(_ab._sys.argv[4]), _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = LPPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], float(_ab._sys.argv[4])) + _ap.startMine() + _ap.mine() + print("Total number of Local Periodic Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/localPeriodicPattern/basic/LPPMBreadth.html b/sphinx/_build/html/_modules/PAMI/localPeriodicPattern/basic/LPPMBreadth.html new file mode 100644 index 000000000..9f92f95b2 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/localPeriodicPattern/basic/LPPMBreadth.html @@ -0,0 +1,733 @@ + + + + + + PAMI.localPeriodicPattern.basic.LPPMBreadth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.localPeriodicPattern.basic.LPPMBreadth

+# Local Periodic Patterns, which are patterns (sets of events) that have a periodic behavior in some non predefined
+# time-intervals. A pattern is said to be a local periodic pattern if it appears regularly and continuously in some
+# time-intervals. The maxSoPer (maximal period of spillovers) measure allows detecting time-intervals of variable
+# lengths where a pattern is continuously periodic, while the minDur (minimal duration) measure ensures that those
+# time-intervals have a minimum duration.
+#
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.localPeriodicPattern.basic import LPPMBreadth as alg
+#
+#             obj = alg.LPPMBreadth(iFile, maxPer, maxSoPer, minDur)
+#
+#             obj.mine()
+#
+#             localPeriodicPatterns = obj.getPatterns()
+#
+#             print(f'Total number of local periodic patterns: {len(localPeriodicPatterns)}')
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print(f'Total memory in USS: {memUSS}')
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print(f'Total memory in RSS: {memRSS}')
+#
+#             runtime = obj.getRuntime()
+#
+#             print(f'Total execution time in seconds: {runtime})
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.localPeriodicPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+import pandas as pd
+from deprecated import deprecated
+
+
+
+[docs] +class LPPMBreadth(_ab._localPeriodicPatterns): + + """ + :Description: + + Local Periodic Patterns, which are patterns (sets of events) that have a periodic behavior in some non predefined + time-intervals. A pattern is said to be a local periodic pattern if it appears regularly and continuously in some + time-intervals. The maxSoPer (maximal period of spillovers) measure allows detecting time-intervals of variable + lengths where a pattern is continuously periodic, while the minDur (minimal duration) measure ensures that those + time-intervals have a minimum duration. + + :Reference: + + Fournier-Viger, P., Yang, P., Kiran, R. U., Ventura, S., Luna, J. M.. (2020). Mining Local Periodic Patterns in + a Discrete Sequence. Information Sciences, Elsevier, to appear. [ppt] DOI: 10.1016/j.ins.2020.09.044 + + :param iFile: str : + Name of the Input file to mine complete set of local periodic pattern's + :param oFile: str : + Name of the output file to store complete set of local periodic patterns + :param minDur: str: + Minimal duration in seconds between consecutive periods of time-intervals where a pattern is continuously periodic. + :param maxPer: float: + Controls the maximum number of transactions in which any two items within a pattern can reappear. + :param maxSoPer: float: + Controls the maximum number of time periods between consecutive periods of time-intervals where a pattern is continuously periodic. + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : str + Input file name or path of the input file + oFile : str + Output file name or path of the output file + maxPer : float + User defined maxPer value. + maxSoPer : float + User defined maxSoPer value. + minDur : float + User defined minDur value. + tsMin : int / date + First time stamp of input data. + tsMax : int / date + Last time stamp of input data. + startTime : float + Time when start of execution the algorithm. + endTime : float + Time when end of execution the algorithm. + finalPatterns : dict + To store local periodic patterns and its PTL. + tsList : dict + To store items and its time stamp as bit vector. + sep: str + separator used to distinguish items from each other. The default separator is tab space. + + :Methods: + + createTSList() + Create the tsList as bit vector from input data. + generateLPP() + Generate 1 length local periodic pattens by tsList and execute depth first search. + calculatePTL(tsList) + Calculate PTL from input tsList as bit vector + LPPMBreathSearch(extensionOfP) + Mining local periodic patterns using breadth first search. + mine() + Mining process will start from here. + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function. + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function. + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function. + getLocalPeriodicPatterns() + return local periodic patterns and its PTL + save(oFile) + Complete set of local periodic patterns will be loaded in to an output file. + getPatternsAsDataFrame() + Complete set of local periodic patterns will be loaded in to a dataframe. + + **Executing the code on terminal:** + -------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 LPPBreadth.py <inputFile> <outputFile> <maxPer> <minSoPer> <minDur> + + Example Usage: + + (.venv) $ python3 LPPMBreadth.py sampleDB.txt patterns.txt 0.3 0.4 0.5 + + .. note: minDur will be considered as time interval between two consecutive periods + + **Sample run of importing the code:** + ------------------------------------- + .. code-block:: python + + from PAMI.localPeriodicPattern.basic import LPPMBreadth as alg + + obj = alg.LPPMBreadth(iFile, maxPer, maxSoPer, minDur) + + obj.mine() + + localPeriodicPatterns = obj.getPatterns() + + print(f'Total number of local periodic patterns: {len(localPeriodicPatterns)}') + + obj.save(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print(f'Total memory in USS: {memUSS}') + + memRSS = obj.getMemoryRSS() + + print(f'Total memory in RSS: {memRSS}') + + runtime = obj.getRuntime() + + print(f'Total execution time in seconds: {runtime}) + + **Credits:** + --------------- + The complete program was written by So Nakamura under the supervision of Professor Rage Uday Kiran. + """ + + _localPeriodicPatterns__iFile = ' ' + _localPeriodicPatterns__oFile = ' ' + _localPeriodicPatterns__maxPer = str() + _localPeriodicPatterns__maxSoPer = str() + _localPeriodicPatterns__minDur = str() + __tsMin = 0 + __tsMax = 0 + _localPeriodicPatterns__startTime = float() + _localPeriodicPatterns__endTime = float() + _localPeriodicPatterns__memoryUSS = float() + _localPeriodicPatterns__memoryRSS = float() + _localPeriodicPatterns__finalPatterns = {} + __tsList = {} + _localPeriodicPatterns__sep = ' ' + __Database = [] + + def __creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self.__Database = [] + if isinstance(self._localPeriodicPatterns__iFile, _ab._pd.DataFrame): + if self._localPeriodicPatterns__iFile.empty: + print("its empty..") + i = self._localPeriodicPatterns__iFile.columns.values.tolist() + if 'Transactions' in i: + self.__Database = self._localPeriodicPatterns__iFile['Transactions'].tolist() + if 'Patterns' in i: + self.__Database = self._localPeriodicPatterns__iFile['Patterns'].tolist() + + if isinstance(self._localPeriodicPatterns__iFile, str): + if _ab._validators.url(self._localPeriodicPatterns__iFile): + data = _ab._urlopen(self._localPeriodicPatterns__iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._localPeriodicPatterns__sep)] + temp = [x for x in temp if x] + self.__Database.append(temp) + else: + try: + with open(self._localPeriodicPatterns__iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._localPeriodicPatterns__sep)] + temp = [x for x in temp if x] + self.__Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def __createTSList(self) -> None: + """ + Create tsList as bit vector from temporal data. + """ + # for line in self.Database: + # count = 1 + # bitVector = 0b1 << count + # bitVector = bitVector | 0b1 + # self.tsMin = int(line.pop(0)) + # self.tsList = {item: bitVector for item in line} + # count += 1 + # ts = ' ' + count = 1 + for line in self.__Database: + bitVector = 0b1 << count + bitVector = bitVector | 0b1 + ts = line[0] + for item in line[1:]: + if self.__tsList.get(item): + different = abs(bitVector.bit_length() - self.__tsList[item].bit_length()) + self.__tsList[item] = self.__tsList[item] << different + self.__tsList[item] = self.__tsList[item] | 0b1 + else: + self.__tsList[item] = bitVector + count += 1 + self.__tsMax = int(ts) + + for item in self.__tsList: + different = abs(bitVector.bit_length() - self.__tsList[item].bit_length()) + self.__tsList[item] = self.__tsList[item] << different + self._localPeriodicPatterns__maxPer = self.__convert(self._localPeriodicPatterns__maxPer) + self._localPeriodicPatterns__maxSoPer = self.__convert(self._localPeriodicPatterns__maxSoPer) + self._localPeriodicPatterns__minDur = self.__convert(self._localPeriodicPatterns__minDur) + + def __generateLPP(self) -> None: + """ + Generate local periodic items from bit vector tsList. + When finish generating local periodic items, execute mining depth first search. + """ + I = set() + PTL = {} + for item in self.__tsList: + PTL[item] = set() + ts = list(bin(self.__tsList[item])) + ts = ts[2:] + start = -1 + currentTs = 1 + tsPre = ' ' + soPer = ' ' + for t in ts[currentTs:]: + if t == '0': + currentTs += 1 + continue + else: + tsPre = currentTs + currentTs += 1 + break + for t in ts[currentTs:]: + if t == '0': + currentTs += 1 + continue + else: + per = currentTs - tsPre + if per <= self._localPeriodicPatterns__maxPer and start == -1: + start = tsPre + soPer = self._localPeriodicPatterns__maxSoPer + if start != -1: + soPer = max(0, soPer + per - self._localPeriodicPatterns__maxPer) + if soPer > self._localPeriodicPatterns__maxSoPer: + if tsPre - start >= self._localPeriodicPatterns__minDur: + PTL[item].add((start, tsPre)) + """else: + bitVector = 0b1 << currentTs + different = abs(self.tsList[item].bit_length() - bitVector.bit_length()) + bitVector = bitVector | 0b1 + bitVector = bitVector << different + self.tsList[item] = self.tsList[item] | bitVector""" + start = -1 + tsPre = currentTs + currentTs += 1 + if start != -1: + soPer = max(0, soPer + self.__tsMax - tsPre - self._localPeriodicPatterns__maxPer) + if soPer > self._localPeriodicPatterns__maxSoPer and tsPre - start >= self._localPeriodicPatterns__minDur: + PTL[item].add((start, tsPre)) + """else: + bitVector = 0b1 << currentTs+1 + different = abs(self.tsList[item].bit_length() - bitVector.bit_length()) + bitVector = bitVector | 0b1 + bitVector = bitVector << different + self.tsList[item] = self.tsList[item] | bitVector""" + if soPer <= self._localPeriodicPatterns__maxSoPer and self.__tsMax - start >= self._localPeriodicPatterns__minDur: + PTL[item].add((start, self.__tsMax)) + """else: + bitVector = 0b1 << currentTs+1 + different = abs(self.tsList[item].bit_length() - bitVector.bit_length()) + bitVector = bitVector | 0b1 + bitVector = bitVector << different + self.tsList[item] = self.tsList[item] | bitVector""" + if len(PTL[item]) > 0: + I |= {item} + self._localPeriodicPatterns__finalPatterns[item] = PTL[item] + I = sorted(list(I)) + map = {-1 : I} + I = set(I) + while len(map) > 0: + map = self.__LPPMBreadthSearch(map) + + def __calculatePTL(self, tsList: int) -> Set[Tuple[int, int]]: + """ + calculate PTL from tsList as bit vector. + + :param tsList: it is one item's tsList which is used bit vector. + :type tsList: int + :return: it is PTL of input item. + :rtype: set + """ + tsList = list(bin(tsList)) + tsList = tsList[2:] + start = -1 + currentTs = 1 + PTL = set() + tsPre = ' ' + soPer = ' ' + for ts in tsList[currentTs:]: + if ts == '0': + currentTs += 1 + continue + else: + tsPre = currentTs + currentTs += 1 + break + for ts in tsList[currentTs:]: + if ts == '0': + currentTs += 1 + continue + else: + per = currentTs - tsPre + if per <= self._localPeriodicPatterns__maxPer and start == -1: + start = tsPre + soPer = self._localPeriodicPatterns__maxSoPer + if start != -1: + soPer = max(0, soPer + per - self._localPeriodicPatterns__maxPer) + if soPer > self._localPeriodicPatterns__maxSoPer: + if tsPre - start >= self._localPeriodicPatterns__minDur: + PTL.add((start, tsPre)) + start = -1 + tsPre = currentTs + currentTs += 1 + if start != -1: + soPer = max(0, soPer + self.__tsMax - tsPre - self._localPeriodicPatterns__maxPer) + if soPer > self._localPeriodicPatterns__maxSoPer and tsPre - start >= self._localPeriodicPatterns__minDur: + PTL.add((start, tsPre)) + if soPer <= self._localPeriodicPatterns__maxSoPer and self.__tsMax - start >= self._localPeriodicPatterns__minDur: + PTL.add((start, tsPre)) + return PTL + + def __LPPMBreadthSearch(self, wMap: Dict[Union[int, str], List[Union[int, str]]]) -> Dict[Union[int, str], List[Union[int, str]]]: + """ + Mining n-length local periodic pattens from n-1-length patterns by depth first search. + + :param wMap: it is w length patterns and its conditional items + :type wMap: dict + :return w1map: it is w+1 length patterns and its conditional items + :rtype w1map: dict + """ + w1map = {} + + for p in wMap: + tsp = ' ' + listP = ' ' + if p != -1: + listP = p + if type(p) == str: + listP = [p] + tsp = self.__tsList[listP[0]] + for item in listP[1:]: + tsp = tsp & self.__tsList[item] + for x in range(len(wMap[p])-1): + for y in range(x+1, len(wMap[p])): + if p == -1: + tspxy = self.__tsList[wMap[p][x]] & self.__tsList[wMap[p][y]] + else: + tspxy = tsp & self.__tsList[wMap[p][x]] & self.__tsList[wMap[p][y]] + PTL = self.__calculatePTL(tspxy) + if len(PTL) > 0: + if p == -1: + if not w1map.get(wMap[p][x]): + w1map[wMap[p][x]] = [] + pattern = (wMap[p][x], wMap[p][y]) + self._localPeriodicPatterns__finalPatterns[pattern] = PTL + w1map[wMap[p][x]].append(wMap[p][y]) + else: + pattern = [item for item in listP] + pattern.append(wMap[p][x]) + pattern1 = pattern.copy() + pattern.append(wMap[p][y]) + self._localPeriodicPatterns__finalPatterns[tuple(pattern)] = PTL + if not w1map.get(tuple(pattern1)): + w1map[tuple(pattern1)] = [] + w1map[tuple(pattern1)].append(wMap[p][y]) + return w1map + + def __convert(self, value: Union[int, float, str]) -> Union[int, float]: + """ + to convert the type of user specified minSup value + + :param value: user specified minSup value + :type value: int or float or str + :return: converted type + :rtype: int or float + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self.__Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self.__Database) * value) + else: + value = int(value) + return value + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + Mining process start from here. + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + Mining process start from here. + """ + self._localPeriodicPatterns__startTime = _ab._time.time() + self.__creatingItemSets() + self._localPeriodicPatterns__maxPer = self.__convert(self._localPeriodicPatterns__maxPer) + self._localPeriodicPatterns__maxSoPer = self.__convert(self._localPeriodicPatterns__maxSoPer) + self._localPeriodicPatterns__minDur = self.__convert(self._localPeriodicPatterns__minDur) + self._localPeriodicPatterns__finalPatterns = {} + self.__createTSList() + self.__generateLPP() + self._localPeriodicPatterns__endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._localPeriodicPatterns__memoryUSS = float() + self._localPeriodicPatterns__memoryRSS = float() + self._localPeriodicPatterns__memoryUSS = process.memory_full_info().uss + self._localPeriodicPatterns__memoryRSS = process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._localPeriodicPatterns__memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._localPeriodicPatterns__memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._localPeriodicPatterns__endTime - self._localPeriodicPatterns__startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> pd.DataFrame: + """ + Storing final local periodic patterns in a dataframe + + :return: returning local periodic patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._localPeriodicPatterns__finalPatterns.items(): + pat = str() + for i in a: + pat = pat + i + ' ' + data.append([pat, b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'PTL']) + return dataFrame
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of local periodic patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._localPeriodicPatterns__oFile = outFile + writer = open(self._localPeriodicPatterns__oFile, 'w+') + for x, y in self._localPeriodicPatterns__finalPatterns.items(): + pat = str() + for i in x: + pat = pat + i + '\t' + pat = pat + ":" + for i in y: + pat = pat + str(i) + '\t' + patternsAndPTL = pat.strip() + writer.write("%s \n" % patternsAndPTL)
+ + +
+[docs] + def getPatterns(self) -> Dict[Union[Tuple[str, ...], str], Set[Tuple[int, int]]]: + """ + Function to send the set of local periodic patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._localPeriodicPatterns__finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + """ + print("Total number of Local Periodic Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == '__main__': + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = LPPMBreadth(_ab._sys.argv[1], _ab._sys.argv[3], float(_ab._sys.argv[4]), _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = LPPMBreadth(_ab._sys.argv[1], _ab._sys.argv[3], float(_ab._sys.argv[4])) + _ap.startMine() + _ap.mine() + print("Total number of Local Periodic Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/localPeriodicPattern/basic/LPPMDepth.html b/sphinx/_build/html/_modules/PAMI/localPeriodicPattern/basic/LPPMDepth.html new file mode 100644 index 000000000..e7c9c8199 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/localPeriodicPattern/basic/LPPMDepth.html @@ -0,0 +1,704 @@ + + + + + + PAMI.localPeriodicPattern.basic.LPPMDepth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.localPeriodicPattern.basic.LPPMDepth

+# Local Periodic Patterns, which are patterns (sets of events) that have a periodic behavior in some non predefined
+# time-intervals. A pattern is said to be a local periodic pattern if it appears regularly and continuously in some
+# time-intervals. The maxSoPer (maximal period of spillovers) measure allows detecting time-intervals of variable
+# lengths where a pattern is continuously periodic, while the minDur (minimal duration) measure ensures that those
+# time-intervals have a minimum duration.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.localPeriodicPattern.basic import LPPMDepth as alg
+#
+#             obj = alg.LPPMDepth(iFile, maxPer, maxSoPer, minDur)
+#
+#             obj.mine()
+#
+#             localPeriodicPatterns = obj.getPatterns()
+#
+#             print(f'Total number of local periodic patterns: {len(localPeriodicPatterns)}')
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print(f'Total memory in USS: {memUSS}')
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print(f'Total memory in RSS: {memRSS}')
+#
+#             runtime = obj.getRuntime()
+#
+#             print(f'Total execution time in seconds: {runtime})
+#
+
+
+
+
+__copyright__ = """
+Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.localPeriodicPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+import pandas as pd
+from deprecated import deprecated
+
+
+
+[docs] +class LPPMDepth(_ab._localPeriodicPatterns): + + """ + :Description: + + Local Periodic Patterns, which are patterns (sets of events) that have a periodic behavior in some non predefined + time-intervals. A pattern is said to be a local periodic pattern if it appears regularly and continuously in some + time-intervals. The maxSoPer (maximal period of spillovers) measure allows detecting time-intervals of variable + lengths where a pattern is continuously periodic, while the minDur (minimal duration) measure ensures that those + time-intervals have a minimum duration. + + :Reference: + + Fournier-Viger, P., Yang, P., Kiran, R. U., Ventura, S., Luna, J. M.. (2020). Mining Local Periodic Patterns in + a Discrete Sequence. Information Sciences, Elsevier, to appear. [ppt] DOI: 10.1016/j.ins.2020.09.044 + + :param iFile: str : + Name of the Input file to mine complete set of local periodic pattern's + :param oFile: str : + Name of the output file to store complete set of local periodic patterns + :param minDur: str: + Minimal duration in seconds between consecutive periods of time-intervals where a pattern is continuously periodic. + :param maxPer: float: + Controls the maximum number of transactions in which any two items within a pattern can reappear. + :param maxSoPer: float: + Controls the maximum number of time periods between consecutive periods of time-intervals where a pattern is continuously periodic. + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + iFile : str + Input file name or path of the input file + oFile : str + Output file name or path of the output file + maxPer : float + User defined maxPer value. + maxSoPer : float + User defined maxSoPer value. + minDur : float + User defined minDur value. + tsmin : int / date + First time stamp of input data. + tsmax : int / date + Last time stamp of input data. + startTime : float + Time when start of execution the algorithm. + endTime : float + Time when end of execution the algorithm. + finalPatterns : dict + To store local periodic patterns and its PTL. + tsList : dict + To store items and its time stamp as bit vector. + sep : str + separator used to distinguish items from each other. The default separator is tab space. + + :Methods: + + createTSlist() + Create the TSlist as bit vector from input data. + generateLPP() + Generate 1 length local periodic pattens by TSlist and execute depth first search. + calculatePTL(tsList) + Calculate PTL from input tsList as bit vector + LPPMDepthSearch(extensionOfP) + Mining local periodic patterns using depth first search. + mine() + Mining process will start from here. + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function. + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function. + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function. + getLocalPeriodicPatterns() + return local periodic patterns and its PTL + save(oFile) + Complete set of local periodic patterns will be loaded in to an output file. + getPatternsAsDataFrame() + Complete set of local periodic patterns will be loaded in to a dataframe. + + **Executing the code on terminal:** + -------------------------------------- + + .. code-block:: console + + Format: + + (.venv) $ python3 LPPMDepth.py <inputFile> <outputFile> <maxPer> <minSoPer> <minDur> + + Example Usage: + + (.venv) $ python3 LPPMDepth.py sampleDB.txt patterns.txt 0.3 0.4 0.5 + + .. note: minDur will be considered as time interval between two consecutive periods + + + **Sample run of importing the code:** + ---------------------------------------- + .. code-block:: python + + from PAMI.localPeriodicPattern.basic import LPPMDepth as alg + + obj = alg.LPPMDepth(iFile, maxPer, maxSoPer, minDur) + + obj.mine() + + localPeriodicPatterns = obj.getPatterns() + + print(f'Total number of local periodic patterns: {len(localPeriodicPatterns)}') + + obj.save(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print(f'Total memory in USS: {memUSS}') + + memRSS = obj.getMemoryRSS() + + print(f'Total memory in RSS: {memRSS}') + + runtime = obj.getRuntime() + + print(f'Total execution time in seconds: {runtime}) + + **Credits:** + ------------- + The complete program was written by So Nakamura under the supervision of Professor Rage Uday Kiran. + """ + + _localPeriodicPatterns__iFile = '' + _localPeriodicPatterns__oFile = '' + _localPeriodicPatterns__maxPer = str() + _localPeriodicPatterns__maxSoPer = str() + _localPeriodicPatterns__minDur = str() + __tsmin = 0 + __tsmax = 0 + _localPeriodicPatterns__startTime = float() + _localPeriodicPatterns__endTime = float() + _localPeriodicPatterns__memoryUSS = float() + _localPeriodicPatterns__memoryRSS = float() + _localPeriodicPatterns__finalPatterns = {} + __tsList = {} + _localPeriodicPatterns__sep = ' ' + __Database = [] + + def __creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self.__Database = [] + if isinstance(self._localPeriodicPatterns__iFile, _ab._pd.DataFrame): + if self._localPeriodicPatterns__iFile.empty: + print("its empty..") + i = self._localPeriodicPatterns__iFile.columns.values.tolist() + if 'Transactions' in i: + self.__Database = self._localPeriodicPatterns__iFile['Transactions'].tolist() + if 'Patterns' in i: + self.__Database = self._localPeriodicPatterns__iFile['Patterns'].tolist() + + if isinstance(self._localPeriodicPatterns__iFile, str): + if _ab._validators.url(self._localPeriodicPatterns__iFile): + data = _ab._urlopen(self._localPeriodicPatterns__iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._localPeriodicPatterns__sep)] + temp = [x for x in temp if x] + self.__Database.append(temp) + else: + try: + with open(self._localPeriodicPatterns__iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._localPeriodicPatterns__sep)] + temp = [x for x in temp if x] + self.__Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def __createTSlist(self) -> None: + """ + Create tsList as bit vector from temporal data. + """ + count = 1 + for line in self.__Database: + bitVector = 0b1 << count + bitVector = bitVector | 0b1 + ts = line[0] + for item in line[1:]: + if self.__tsList.get(item): + different = abs(bitVector.bit_length() - self.__tsList[item].bit_length()) + self.__tsList[item] = self.__tsList[item] << different + self.__tsList[item] = self.__tsList[item] | 0b1 + else: + self.__tsList[item] = bitVector + count += 1 + self.__tsmax = int(ts) + for item in self.__tsList: + different = abs(bitVector.bit_length() - self.__tsList[item].bit_length()) + self.__tsList[item] = self.__tsList[item] << different + self._localPeriodicPatterns__maxPer = self.__convert(self._localPeriodicPatterns__maxPer) + self._localPeriodicPatterns__maxSoPer = self.__convert(self._localPeriodicPatterns__maxSoPer) + self._localPeriodicPatterns__minDur = self.__convert(self._localPeriodicPatterns__minDur) + + def __generateLPP(self) -> None: + """ + Generate local periodic items from bit vector tsList. + When finish generating local periodic items, execute mining depth first search. + """ + I = set() + PTL = {} + for item in self.__tsList: + PTL[item] = set() + ts = list(bin(self.__tsList[item])) + ts = ts[2:] + start = -1 + currentTs = 1 + tsPre = ' ' + soPer = ' ' + for t in ts[currentTs:]: + if t == '0': + currentTs += 1 + continue + else: + tsPre = currentTs + currentTs += 1 + break + for t in ts[currentTs:]: + if t == '0': + currentTs += 1 + continue + else: + per = currentTs - tsPre + if per <= self._localPeriodicPatterns__maxPer and start == -1: + start = tsPre + soPer = self._localPeriodicPatterns__maxSoPer + if start != -1: + soPer = max(0, soPer + per - self._localPeriodicPatterns__maxPer) + if soPer > self._localPeriodicPatterns__maxSoPer: + if tsPre - start >= self._localPeriodicPatterns__minDur: + PTL[item].add((start, tsPre)) + """else: + bitVector = 0b1 << currentTs + different = abs(self.tsList[item].bit_length() - bitVector.bit_length()) + bitVector = bitVector | 0b1 + bitVector = bitVector << different + self.tsList[item] = self.tsList[item] | bitVector""" + start = -1 + tsPre = currentTs + currentTs += 1 + if start != -1: + soPer = max(0, soPer + self.__tsmax - tsPre - self._localPeriodicPatterns__maxPer) + if soPer > self._localPeriodicPatterns__maxSoPer and tsPre - start >= self._localPeriodicPatterns__minDur: + PTL[item].add((start, tsPre)) + """else: + bitVector = 0b1 << currentTs+1 + different = abs(self.tsList[item].bit_length() - bitVector.bit_length()) + bitVector = bitVector | 0b1 + bitVector = bitVector << different + self.tsList[item] = self.tsList[item] | bitVector""" + if soPer <= self._localPeriodicPatterns__maxSoPer and self.__tsmax - start >= self._localPeriodicPatterns__minDur: + PTL[item].add((start, self.__tsmax)) + """else: + bitVector = 0b1 << currentTs+1 + different = abs(self.tsList[item].bit_length() - bitVector.bit_length()) + bitVector = bitVector | 0b1 + bitVector = bitVector << different + self.tsList[item] = self.tsList[item] | bitVector""" + if len(PTL[item]) > 0: + I |= {item} + self._localPeriodicPatterns__finalPatterns[item] = PTL[item] + I = sorted(list(I)) + # I = set(I) + self.__LPPMDepthSearch(I) + + def __calculatePTL(self, tsList: int) -> Set[Tuple[int, int]]: + """ + calculate PTL from tsList as bit vector. + + :param tsList: it is one item's tsList which is used bit vector. + :type tsList: int + :return: it is PTL of input item. + :rtype: set + """ + tsList = list(bin(tsList)) + tsList = tsList[2:] + start = -1 + currentTs = 1 + PTL = set() + tsPre = ' ' + soPer = ' ' + for ts in tsList[currentTs:]: + if ts == '0': + currentTs += 1 + continue + else: + tsPre = currentTs + currentTs += 1 + break + for ts in tsList[currentTs:]: + if ts == '0': + currentTs += 1 + continue + else: + per = currentTs - tsPre + if per <= self._localPeriodicPatterns__maxPer and start == -1: + start = tsPre + soPer = self._localPeriodicPatterns__maxSoPer + if start != -1: + soPer = max(0, soPer + per - self._localPeriodicPatterns__maxPer) + if soPer > self._localPeriodicPatterns__maxSoPer: + if tsPre - start >= self._localPeriodicPatterns__minDur: + PTL.add((start, tsPre)) + start = -1 + tsPre = currentTs + currentTs += 1 + if start != -1: + soPer = max(0, soPer + self.__tsmax - tsPre - self._localPeriodicPatterns__maxPer) + if soPer > self._localPeriodicPatterns__maxSoPer and tsPre - start >= self._localPeriodicPatterns__minDur: + PTL.add((start, tsPre)) + if soPer <= self._localPeriodicPatterns__maxSoPer and self.__tsmax - start >= self._localPeriodicPatterns__minDur: + PTL.add((start, tsPre)) + return PTL + + def __LPPMDepthSearch(self, extensionsOfP: List[Union[Tuple[str, ...], str]]) -> None: + """ + Mining n-length local periodic pattens from n-1-length patterns by depth first search. + + :param extensionsOfP: it is n-1 length patterns list. + :type extensionsOfP: list + :return: None + """ + for x in range(len(extensionsOfP)-1): + extensionsOfPx = set() + for y in range(x+1,len(extensionsOfP)): + tspxy = self.__tsList[extensionsOfP[x]] & self.__tsList[extensionsOfP[y]] + PTL = self.__calculatePTL(tspxy) + if len(PTL) > 0: + if type(extensionsOfP[x]) == str: + pattern = (extensionsOfP[x], extensionsOfP[y]) + self._localPeriodicPatterns__finalPatterns[pattern] = PTL + self.__tsList[pattern] = tspxy + extensionsOfPx.add(pattern) + else: + px = [item for item in extensionsOfP[x]] + py = [item for item in extensionsOfP[y]] + pattern = set(px + py) + self._localPeriodicPatterns__finalPatterns[tuple(pattern)] = PTL + self.__tsList[tuple(pattern)] = tspxy + extensionsOfPx.add(tuple(pattern)) + if extensionsOfPx: + self.__LPPMDepthSearch(list(extensionsOfPx)) + + def __convert(self, value: Union[int, float, str]) -> Union[int, float]: + """ + to convert the type of user specified minSup value + + :param value: user specified minSup value + :type value: int or float or str + :return: converted type + :rtype: int or float + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self.__Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self.__Database) * value) + else: + value = int(value) + return value + +
+[docs] + @deprecated("It is recommended to use 'mine()' instead of 'startMine()' for mining process. Starting from January 2025, 'startMine()' will be completely terminated.") + def startMine(self) -> None: + """ + Mining process start from here. This function calls createTSlist and generateLPP. + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + Mining process start from here. This function calls createTSlist and generateLPP. + """ + self._localPeriodicPatterns__startTime = _ab._time.time() + self._localPeriodicPatterns__finalPatterns = {} + self.__creatingItemSets() + self._localPeriodicPatterns__maxPer = self.__convert(self._localPeriodicPatterns__maxPer) + self._localPeriodicPatterns__maxSoPer = self.__convert(self._localPeriodicPatterns__maxSoPer) + self._localPeriodicPatterns__minDur = self.__convert(self._localPeriodicPatterns__minDur) + self.__createTSlist() + self.__generateLPP() + self._localPeriodicPatterns__endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._localPeriodicPatterns__memoryRSS = float() + self._localPeriodicPatterns__memoryUSS = float() + self._localPeriodicPatterns__memoryUSS = process.memory_full_info().uss + self._localPeriodicPatterns__memoryRSS = process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._localPeriodicPatterns__memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._localPeriodicPatterns__memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._localPeriodicPatterns__endTime - self._localPeriodicPatterns__startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final local periodic patterns in a dataframe + + :return: returning local periodic patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._localPeriodicPatterns__finalPatterns.items(): + pat = str() + for i in a: + pat = pat + i + ' ' + data.append([pat, b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'PTL']) + return dataFrame
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of local periodic patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._localPeriodicPatterns__oFile = outFile + writer = open(self._localPeriodicPatterns__oFile, 'w+') + for x, y in self._localPeriodicPatterns__finalPatterns.items(): + pat = str() + for i in x: + pat = pat + i + '\t' + pat = pat + ":" + for i in y: + pat = pat + str(i) + '\t' + patternsAndPTL = pat.strip() + writer.write("%s \n" % patternsAndPTL)
+ + +
+[docs] + def getPatterns(self) -> Dict[Union[Tuple[str, ...], str], Set[Tuple[int, int]]]: + """ + Function to send the set of local periodic patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._localPeriodicPatterns__finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + """ + print("Total number of Local Periodic Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == '__main__': + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = LPPMDepth(_ab._sys.argv[1], _ab._sys.argv[3], float(_ab._sys.argv[4]), _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = LPPMDepth(_ab._sys.argv[1], _ab._sys.argv[3], float(_ab._sys.argv[4])) + _ap.startMine() + _ap.mine() + print("Total number of Local Periodic Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/multipleMinimumSupportBasedFrequentPattern/basic/CFPGrowth.html b/sphinx/_build/html/_modules/PAMI/multipleMinimumSupportBasedFrequentPattern/basic/CFPGrowth.html new file mode 100644 index 000000000..74c74676d --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/multipleMinimumSupportBasedFrequentPattern/basic/CFPGrowth.html @@ -0,0 +1,815 @@ + + + + + + PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth

+# CFPGrowth is a basic code of the fundamental algorithm to discover frequent patterns based on multiple minimum support in a transactional database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.multipleMinimumSupportBasedFrequentPattern.basic import basic as alg
+#
+#             obj = alg.basic(iFile, mIS)
+#
+#             obj.startMine()
+#
+#             frequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(frequentPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#            print("Total Memory in RSS", memRSS)
+#
+#            run = obj.getRuntime()
+#
+#            print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+
+from PAMI.multipleMinimumSupportBasedFrequentPattern.basic import abstract as _fp
+from typing import List, Dict, Tuple, Generator
+import pandas as pd
+from deprecated import deprecated
+
+_fp._sys.setrecursionlimit(20000)
+_MIS = {}
+
+class _Node:
+    """
+        A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+
+        itemId: int
+            storing item of a node
+        counter: int
+            To maintain the support of node
+        parent: node
+            To maintain the parent of node
+        children: list
+            To maintain the children of node
+
+    :Methods:
+
+        addChild(node)
+            Updates the nodes children list and parent for the given node
+
+    """
+
+    def __init__(self, item: int, children: list) -> None:
+        self.itemId = item
+        self.counter = 1
+        self.parent = None
+        self.children = children
+
+    def addChild(self, node) -> None:
+        """
+        Retrieving the child from the tree
+
+        :param node: Children node.
+        :type node: Node
+        :return: Updates the children nodes and parent nodes
+
+        """
+        self.children[node.itemId] = node
+        node.parent = self
+
+
+class _Tree:
+    """
+    A class used to represent the frequentPatternGrowth tree structure
+
+    :Attributes:
+
+        root : Node
+            The first node of the tree set to Null.
+        summaries : dictionary
+            Stores the nodes itemId which shares same itemId
+        info : dictionary
+            frequency of items in the transactions
+
+    :Methods:
+
+        addTransaction(transaction, freq)
+            adding items of  transactions into the tree as nodes and freq is the count of nodes
+        getFinalConditionalPatterns(node)
+            getting the conditional patterns from fp-tree for a node
+        getConditionalPatterns(patterns, frequencies)
+            sort the patterns by removing the items with lower minSup
+        generatePatterns(prefix)
+            generating the patterns from fp-tree
+    """
+
+    def __init__(self) -> None:
+        self.root = _Node(None, {})
+        self.summaries = {}
+        self.info = {}
+
+    def addTransaction(self, transaction, count) -> None:
+        """
+        adding transaction into tree
+
+        :param transaction: it represents the one transaction in database
+        :type transaction: list
+        :param count: frequency of item
+        :type count: int
+        :return: None
+        """
+
+        # This method takes transaction as input and returns the tree
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _Node(transaction[i], {})
+                newNode.freq = count
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].append(newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+                currentNode.freq += count
+
+    def getFinalConditionalPatterns(self, alpha) -> Tuple[List[List[int]], List[int], Dict[int, int]]:
+        """
+        generates the conditional patterns for a node
+
+        :param alpha: node to generate conditional patterns
+        :return: returns conditional patterns, frequency of each item in conditional patterns
+
+        """
+        finalPatterns = []
+        finalFreq = []
+        for i in self.summaries[alpha]:
+            set1 = i.freq
+            set2 = []
+            while i.parent.itemId is not None:
+                set2.append(i.parent.itemId)
+                i = i.parent
+            if len(set2) > 0:
+                set2.reverse()
+                finalPatterns.append(set2)
+                finalFreq.append(set1)
+        finalPatterns, finalFreq, info = self.getConditionalTransactions(finalPatterns, finalFreq)
+        return finalPatterns, finalFreq, info
+
+    @staticmethod
+    def getConditionalTransactions(ConditionalPatterns, conditionalFreq) -> Tuple[List[List[int]], List[int], Dict[int, int]]:
+        """
+        To calculate the frequency of items in conditional patterns and sorting the patterns
+
+        :param ConditionalPatterns: paths of a node
+        :param conditionalFreq: frequency of each item in the path
+        :return: conditional patterns and frequency of each item in transactions
+        """
+        global _minSup
+        pat = []
+        freq = []
+        data1 = {}
+        for i in range(len(ConditionalPatterns)):
+            for j in ConditionalPatterns[i]:
+                if j in data1:
+                    data1[j] += conditionalFreq[i]
+                else:
+                    data1[j] = conditionalFreq[i]
+        #up_dict = {k: v for k, v in data1.items() if v >= _minSup}
+        up_dict = data1.copy()
+        count = 0
+        for p in ConditionalPatterns:
+            p1 = [v for v in p if v in up_dict]
+            trans = sorted(p1, key=lambda x: (up_dict.get(x), -x), reverse=True)
+            if len(trans) > 0:
+                pat.append(trans)
+                freq.append(conditionalFreq[count])
+            count += 1
+        return pat, freq, up_dict
+
+    def generatePatterns(self, prefix) -> Generator[Tuple[List[int], int], None, None]:
+        """
+        To generate the frequent patterns
+
+        :param prefix: an empty list
+        :return: Frequent patterns that are extracted from fp-tree
+        """
+        global _MIS
+        for i in sorted(self.summaries, key=lambda x: (self.info.get(x), -x)):
+            pattern = prefix[:]
+            pattern.append(i)
+            sup = []
+            for j in pattern:
+                sup.append(_MIS[j])
+            if self.info[i] >= min(sup):
+                yield pattern, self.info[i]
+            patterns, freq, info = self.getFinalConditionalPatterns(i)
+            conditionalTree = _Tree()
+            conditionalTree.info = info.copy()
+            for pat in range(len(patterns)):
+                conditionalTree.addTransaction(patterns[pat], freq[pat])
+            if len(patterns) > 0:
+                for q in conditionalTree.generatePatterns(pattern):
+                    yield q
+
+
+
+[docs] +class CFPGrowth(_fp._frequentPatterns): + """ + :Description: basic is one of the fundamental algorithm to discover frequent patterns based on multiple minimum support in a transactional database. + + :Reference: Ya-Han Hu and Yen-Liang Chen. 2006. Mining association rules with multiple minimum supports: a new mining algorithm and a support tuning mechanism. + Decis. Support Syst. 42, 1 (October 2006), 1–24. https://doi.org/10.1016/j.dss.2004.09.007 + + + :param iFile: str : + Name of the Input file to mine complete set of Uncertain Minimum Support Based Frequent patterns + :param oFile: str : + Name of the output file to store complete set of Uncertain Minimum Support Based Frequent patterns + :param minSup: str: + minimum support thresholds were tuned to find the appropriate ranges in the limited memory + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Input file name or path of the input file + MIS: file or dictionary + Multiple minimum supports of all items in the database + sep : str + This variable is used to distinguish items from one another in a transaction. The default separator is tab space or \t. + However, the users can override their default separator. + oFile : file + Name of the output file or the path of the output file + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transactions + tree : class + it represents the Tree class + finalPatterns : dict + it represents to store the patterns + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to an output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets() + Scans the dataset or dataframes and stores in list format + frequentOneItem() + Extracts the one-frequent patterns from transactions + + **Executing the code on terminal:** + ------------------------------------- + .. code-block:: console + + + Format: + + (.venv) $ python3 CFPGrowth.py <inputFile> <outputFile> + + Examples: + + (.venv) $ python3 CFPGrowth.py sampleDB.txt patterns.txt MISFile.txt + + + .. note:: minSup will be considered in support count or frequency + + + **Sample run of the importing code:** + ---------------------------------------- + .. code-block:: python + + from PAMI.multipleMinimumSupportBasedFrequentPattern.basic import basic as alg + + obj = alg.basic(iFile, mIS) + + obj.startMine() + + frequentPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(frequentPatterns)) + + obj.save(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + -------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.\n + + """ + + __startTime = float() + __endTime = float() + _MIS = str + __finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + __memoryUSS = float() + __memoryRSS = float() + __Database = [] + __mapSupport = {} + __lno = 0 + __tree = _Tree() + __rank = {} + __rankDup = {} + + def __init__(self, iFile, MIS, sep='\t') -> None: + super().__init__(iFile, MIS, sep) + + def __creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + :return: None + """ + self.__Database = [] + if isinstance(self._iFile, _fp._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self.__Database = self._iFile['Transactions'].tolist() + + # print(self.Database) + if isinstance(self._iFile, str): + if _fp._validators.url(self._iFile): + data = _fp._urlopen(self._iFile) + for line in data: + line = line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self.__Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + temp = [i.rstrip() for i in line.split('\t')] + temp = [x for x in temp if x] + # print(temp) + self.__Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _getMISValues(self) -> None: + """ + Storing the Minimum supports given by the user for each item in the database + :reurtn: None + """ + self._MISValues = {} + if isinstance(self._MIS, _fp._pd.DataFrame): + items, MIS = [], [] + if self._MIS.empty: + print("its empty..") + i = self._MIS.columns.values.tolist() + if 'items' in i: + items = self._MIS['items'].tolist() + if 'MIS' in i: + MIS = self._MIS['MIS'].tolist() + for i in range(len(items)): + self._MISValues[items[i]] = MIS[i] + + if isinstance(self._MIS, str): + if _fp._validators.url(self._MIS): + data = _fp._urlopen(self._MIS) + for line in data: + line = line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._MISValues[temp[0]] = int(temp[1]) + else: + try: + with open(self._MIS, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._MISValues[temp[0]] = int(temp[1]) + print(len(self._MISValues)) + except IOError: + print("File Not Found") + quit() + + def __convert(self, value) -> float: + """ + to convert the type of user specified minSup value + + :param value: user specified minSup value + :return: converted type + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self.__Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self.__Database) * value) + else: + value = int(value) + return value + + def __frequentOneItem(self) -> List[str]: + """ + Generating One frequent items sets + """ + self.__mapSupport = {} + for tr in self.__Database: + for i in range(len(tr)): + if tr[i] not in self.__mapSupport: + self.__mapSupport[tr[i]] = 1 + else: + self.__mapSupport[tr[i]] += 1 + # for x, y in self.__mapSupport.items(): + # print(x, y) + self.__mapSupport = {k: v for k, v in self.__mapSupport.items() if v >= min(self._MISValues.values())} + # for x, y in self.__mapSupport.items(): + # print(x, y) + genList = [k for k, v in sorted(self.__mapSupport.items(), key=lambda x: x[1], reverse=True)] + self.__rank = dict([(index, item) for (item, index) in enumerate(genList)]) + return genList + + def __updateTransactions(self, itemSet) -> List[List[int]]: + """ + Updates the items in transactions with rank of items according to their support + + :Example: oneLength = {'a':7, 'b': 5, 'c':'4', 'd':3} + rank = {'a':0, 'b':1, 'c':2, 'd':3} + + :param itemSet: list of one-frequent items + """ + list1 = [] + for tr in self.__Database: + list2 = [] + for i in range(len(tr)): + if tr[i] in itemSet: + list2.append(self.__rank[tr[i]]) + if len(list2) >= 1: + list2.sort() + list1.append(list2) + return list1 + + @staticmethod + def __buildTree(transactions, info) -> _Tree: + """ + Builds the tree with updated transactions + + :param transactions: updated transactions + :param info: support details of each item in transactions. + :return: transactions compressed in fp-tree + + """ + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(transactions)): + rootNode.addTransaction(transactions[i], 1) + return rootNode + + def __savePeriodic(self, itemSet) -> str: + """ + The duplication items and their ranks + + :param itemSet: frequent itemSet that generated + :return: patterns with original item names. + """ + temp = str() + for i in itemSet: + temp = temp + self.__rankDup[i] + "\t" + return temp + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self) -> None: + """ + main program to start the operation + :return: none + + """ + self.mine()
+ + +
+[docs] + def Mine(self) -> None: + """ + main program to start the operation + :return: none + + """ + global _MIS + self.__startTime = _fp._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + self.__creatingItemSets() + self._getMISValues() + #MIS = self._MISValues + itemSet = self.__frequentOneItem() + updatedTransactions = self.__updateTransactions(itemSet) + for x, y in self.__rank.items(): + _MIS[y] = self._MISValues[x] + self.__rankDup[y] = x + info = {self.__rank[k]: v for k, v in self.__mapSupport.items()} + __Tree = self.__buildTree(updatedTransactions, info) + patterns = __Tree.generatePatterns([]) + self.__finalPatterns = {} + for k in patterns: + s = self.__savePeriodic(k[0]) + self.__finalPatterns[str(s)] = k[1] + print("Frequent patterns were generated successfully using basic algorithm") + self.__endTime = _fp._time.time() + self.__memoryUSS = float() + self.__memoryRSS = float() + process = _fp._psutil.Process(_fp._os.getpid()) + self.__memoryUSS = process.memory_full_info().uss + self.__memoryRSS = process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self.__memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self.__memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """Calculating the total amount of runtime taken by the mining process + + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self.__endTime - self.__startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> pd.DataFrame: + """Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataframe = {} + data = [] + for a, b in self.__finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataframe = _fp._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataframe
+ + +
+[docs] + def save(self, outFile: str) -> None: + """Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self.__finalPatterns.items(): + s1 = x.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> Dict[str, int]: + """ Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self.__finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + this function is used to print the results + :return: None + """ + print("Total number of Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_fp._sys.argv) == 4 or len(_fp._sys.argv) == 5: + if len(_fp._sys.argv) == 5: + _ap = CFPGrowth(_fp._sys.argv[1], _fp._sys.argv[3], _fp._sys.argv[4]) + if len(_fp._sys.argv) == 4: + _ap = CFPGrowth(_fp._sys.argv[1], _fp._sys.argv[3]) + _ap.startMine() + print("Total number of Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_fp._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + _ap = CFPGrowth('/Users/Likhitha/Downloads/Transactional_T10I4D100K-3.csv', '/Users/Likhitha/Downloads/MIS_T10I4D100K_.csv', '\t') + _ap.startMine() + print("Total number of Frequent Patterns:", len(_ap.getPatterns())) + _ap.save('/Users/Likhitha/Downloads/CFPGrowth_output.txt') + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/multipleMinimumSupportBasedFrequentPattern/basic/CFPGrowthPlus.html b/sphinx/_build/html/_modules/PAMI/multipleMinimumSupportBasedFrequentPattern/basic/CFPGrowthPlus.html new file mode 100644 index 000000000..62cad4bc9 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/multipleMinimumSupportBasedFrequentPattern/basic/CFPGrowthPlus.html @@ -0,0 +1,763 @@ + + + + + + PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus

+#  Copyright (C)  2021 Rage Uday Kiran
+#
+#
+#      This program is free software: you can redistribute it and/or modify
+#      it under the terms of the GNU General Public License as published by
+#      the Free Software Foundation, either version 3 of the License, or
+#      (at your option) any later version.
+#
+#      This program is distributed in the hope that it will be useful,
+#      but WITHOUT ANY WARRANTY; without even the implied warranty of
+#      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#      GNU General Public License for more details.
+#
+#      You should have received a copy of the GNU General Public License
+#      along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+from PAMI.multipleMinimumSupportBasedFrequentPattern.basic import abstract as _fp
+from deprecated import deprecated
+
+_fp._sys.setrecursionlimit(20000)
+MIS = {}
+
+class _Node:
+    """
+    A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+
+        itemId: int
+            storing item of a node
+        counter: int
+            To maintain the support of node
+        parent: node
+            To maintain the parent of node
+        children: list
+            To maintain the children of node
+
+    :Methods:
+
+        addChild(node)
+            Updates the nodes children list and parent for the given node
+
+    """
+
+    def __init__(self, item, children):
+        self.itemId = item
+        self.counter = 1
+        self.parent = None
+        self.children = children
+
+    def addChild(self, node):
+        """
+        Retrieving the child from the tree
+
+        :param node: Children node.
+        :type node: Node
+        :return: Updates the children nodes and parent nodes
+        """
+        self.children[node.itemId] = node
+        node.parent = self
+
+
+class _Tree:
+    """
+    A class used to represent the frequentPatternGrowth tree structure
+
+    :Attributes:
+
+        root : Node
+            The first node of the tree set to Null.
+        summaries : dictionary
+            Stores the nodes itemId which shares same itemId
+        info : dictionary
+            frequency of items in the transactions
+
+    :Methods:
+
+        addTransaction(transaction, freq)
+            adding items of  transactions into the tree as nodes and freq is the count of nodes
+        getFinalConditionalPatterns(node)
+            getting the conditional patterns from fp-tree for a node
+        getConditionalPatterns(patterns, frequencies)
+            sort the patterns by removing the items with lower minSup
+        generatePatterns(prefix)
+            generating the patterns from fp-tree
+    """
+
+    def __init__(self):
+        self.root = _Node(None, {})
+        self.summaries = {}
+        self.info = {}
+
+    def addTransaction(self, transaction, count):
+        """
+        adding transaction into tree
+
+        :param transaction: it represents the one transactions in database
+        :type transaction: list
+        :param count: frequency of item
+        :type count: int
+        """
+
+        # This method takes transaction as input and returns the tree
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _Node(transaction[i], {})
+                newNode.freq = count
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].append(newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+                currentNode.freq += count
+
+    def getFinalConditionalPatterns(self, alpha, support):
+        """
+        generates the conditional patterns for a node
+
+        :param alpha: node to generate conditional patterns
+        :return: returns conditional patterns, frequency of each item in conditional patterns
+
+        """
+        finalPatterns = []
+        finalFreq = []
+        for i in self.summaries[alpha]:
+            set1 = i.freq
+            set2 = []
+            while i.parent.itemId is not None:
+                set2.append(i.parent.itemId)
+                i = i.parent
+            if len(set2) > 0:
+                set2.reverse()
+                finalPatterns.append(set2)
+                finalFreq.append(set1)
+        finalPatterns, finalFreq, info = self.getConditionalTransactions(finalPatterns, finalFreq, support)
+        return finalPatterns, finalFreq, info
+
+    @staticmethod
+    def getConditionalTransactions(ConditionalPatterns, conditionalFreq, support):
+        """
+        To calculate the frequency of items in conditional patterns and sorting the patterns
+
+        :param ConditionalPatterns: paths of a node
+        :param conditionalFreq: frequency of each item in the path
+        :return: conditional patterns and frequency of each item in transactions
+        """
+        #global _minSup
+        pat = []
+        freq = []
+        data1 = {}
+        for i in range(len(ConditionalPatterns)):
+            for j in ConditionalPatterns[i]:
+                if j in data1:
+                    data1[j] += conditionalFreq[i]
+                else:
+                    data1[j] = conditionalFreq[i]
+        up_dict = {k: v for k, v in data1.items() if v >= support}
+        #up_dict = data1.copy()
+        count = 0
+        for p in ConditionalPatterns:
+            p1 = [v for v in p if v in up_dict]
+            trans = sorted(p1, key=lambda x: (up_dict.get(x), -x), reverse=True)
+            if len(trans) > 0:
+                pat.append(trans)
+                freq.append(conditionalFreq[count])
+            count += 1
+        return pat, freq, up_dict
+
+    def generatePatterns(self, prefix):
+        """
+        To generate the frequent patterns
+
+        :param prefix: an empty list
+        :return: Frequent patterns that are extracted from fp-tree
+
+        """
+        global minMIS
+        for i in sorted(self.summaries, key=lambda x: (self.info.get(x), -x)):
+            pattern = prefix[:]
+            pattern.append(i)
+            if self.info[i] >= minMIS:
+              yield pattern, self.info[i]
+            patterns, freq, info = self.getFinalConditionalPatterns(i, self.info[i])
+            conditionalTree = _Tree()
+            conditionalTree.info = info.copy()
+            for pat in range(len(patterns)):
+                conditionalTree.addTransaction(patterns[pat], freq[pat])
+            if len(patterns) > 0:
+                for q in conditionalTree.generatePatterns(pattern):
+                    yield q
+
+minMIS = 0
+
+
+[docs] +class CFPGrowthPlus(_fp._frequentPatterns): + """ + + :Description: + + :Reference: R. Uday Kiran P. Krishna Reddy Novel techniques to reduce search space in multiple minimum supports-based frequent + pattern mining algorithms. 11-20 2011 EDBT https://doi.org/10.1145/1951365.1951370 + + :param iFile: str : + Name of the Input file to mine complete set of Uncertain Multiple Minimum Support Based Frequent patterns + :param oFile: str : + Name of the output file to store complete set of Uncertain Minimum Support Based Frequent patterns + :param minSup: str: + minimum support thresholds were tuned to find the appropriate ranges in the limited memory + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + + :Attributes: + + iFile : file + Input file name or path of the input file + MIS: file or dictionary + Multiple minimum supports of all items in the database + sep : str + This variable is used to distinguish items from one another in a transaction. The default separator is tab space or \t. + However, the users can override their default separator. + oFile : file + Name of the output file or the path of the output file + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transactions + tree : class + it represents the Tree class + finalPatterns : dict + it represents to store the patterns + + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + savePatterns(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets() + Scans the dataset or dataframes and stores in list format + frequentOneItem() + Extracts the one-frequent patterns from transactions + + **Executing the code on terminal:** + ------------------------------------ + .. code-block:: console + + + Format: + + (.venv) $ python3 CFPGrowthPlus.py <inputFile> <outputFile> + + Examples: + + (.venv) $ python3 CFPGrowthPlus.py sampleDB.txt patterns.txt MISFile.txt + + + .. note:: minSup will be considered in support count or frequency + + + **Sample run of the importing code:** + ---------------------------------------- + .. code-block:: python + + from PAMI.multipleMinimumSupportBasedFrequentPattern.basic import CFPGrowthPlus as alg + + obj = alg.CFPGrowthPlus(iFile, mIS) + + obj.startMine() + + frequentPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(frequentPatterns)) + + obj.savePatterns(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + --------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.\n + + """ + + __startTime = float() + __endTime = float() + _MIS = str + __finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + __memoryUSS = float() + __memoryRSS = float() + __Database = [] + __mapSupport = {} + __lno = 0 + __tree = _Tree() + __rank = {} + __rankDup = {} + + def __init__(self, iFile, MIS, sep='\t'): + super().__init__(iFile, MIS, sep) + + def __creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + + """ + self.__Database = [] + if isinstance(self._iFile, _fp._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self.__Database = self._iFile['Transactions'].tolist() + + # print(self.Database) + if isinstance(self._iFile, str): + if _fp._validators.url(self._iFile): + data = _fp._urlopen(self._iFile) + for line in data: + line = line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self.__Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self.__Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _getMISValues(self): + """ + Storing the Minimum supports given by the user for each item in the database + + """ + self._MISValues = {} + if isinstance(self._MIS, _fp._pd.DataFrame): + items, MIS = [], [] + if self._MIS.empty: + print("its empty..") + i = self._MIS.columns.values.tolist() + if 'items' in i: + items = self._MIS['items'].tolist() + if 'MIS' in i: + MIS = self._MIS['MIS'].tolist() + for i in range(len(items)): + self._MISValues[items[i]] = MIS[i] + + if isinstance(self._MIS, str): + if _fp._validators.url(self._MIS): + data = _fp._urlopen(self._MIS) + for line in data: + line = line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._MISValues[temp[0]] = int(temp[1]) + else: + try: + with open(self._MIS, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._MISValues[temp[0]] = int(temp[1]) + except IOError: + print("File Not Found") + quit() + + def __convert(self, value): + """ + to convert the type of user specified minSup value + + :param value: user specified minSup value + :return: converted type + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self.__Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self.__Database) * value) + else: + value = int(value) + return value + + def __frequentOneItem(self): + """ + Generating One frequent items sets + """ + global minMIS + self.__mapSupport = {} + for tr in self.__Database: + for i in range(1, len(tr)): + if tr[i] not in self.__mapSupport: + self.__mapSupport[tr[i]] = 1 + else: + self.__mapSupport[tr[i]] += 1 + self.__mapSupport = {k: v for k, v in self.__mapSupport.items() if v >= min(self._MISValues.values())} + minMIS = min(self._MISValues.values()) + genList = [k for k, v in sorted(self.__mapSupport.items(), key=lambda x: x[1], reverse=True)] + self.__rank = dict([(index, item) for (item, index) in enumerate(genList)]) + return genList + + def __updateTransactions(self, itemSet): + """ + Updates the items in transactions with rank of items according to their support + + :Example: oneLength = {'a':7, 'b': 5, 'c':'4', 'd':3} + rank = {'a':0, 'b':1, 'c':2, 'd':3} + + :param itemSet: list of one-frequent items + + """ + list1 = [] + for tr in self.__Database: + list2 = [] + for i in range(len(tr)): + if tr[i] in itemSet: + list2.append(self.__rank[tr[i]]) + if len(list2) >= 1: + list2.sort() + list1.append(list2) + return list1 + + @staticmethod + def __buildTree(transactions, info): + """ + Builds the tree with updated transactions + + :param transactions: updated transactions + :param info: support details of each item in transactions. + :return: transactions compressed in fp-tree + """ + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(transactions)): + rootNode.addTransaction(transactions[i], 1) + return rootNode + + def __savePeriodic(self, itemSet): + """ + The duplication items and their ranks + + :param itemSet: frequent itemSet that generated + :return: patterns with original item names. + + """ + temp = str() + for i in itemSet: + temp = temp + self.__rankDup[i] + " " + return temp + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self): + """ + main program to start the operation + + """ + self.mine()
+ + +
+[docs] + def Mine(self): + """ + main program to start the operation + + """ + global MIS + self.__startTime = _fp._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + self.__creatingItemSets() + self._getMISValues() + itemSet = self.__frequentOneItem() + updatedTransactions = self.__updateTransactions(itemSet) + for x, y in self.__rank.items(): + MIS[y] = self._MISValues[x] + self.__rankDup[y] = x + info = {self.__rank[k]: v for k, v in self.__mapSupport.items()} + __Tree = self.__buildTree(updatedTransactions, info) + patterns = __Tree.generatePatterns([]) + self.__finalPatterns = {} + for k in patterns: + s = self.__savePeriodic(k[0]) + self.__finalPatterns[str(s)] = k[1] + print("Frequent patterns were generated successfully using frequentPatternGrowth algorithm") + self.__endTime = _fp._time.time() + self.__memoryUSS = float() + self.__memoryRSS = float() + process = _fp._psutil.Process(_fp._os.getpid()) + self.__memoryUSS = process.memory_full_info().uss + self.__memoryRSS = process.memory_info().rss
+ + + +
+[docs] + def getMemoryUSS(self): + """Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self.__memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self.__memoryRSS
+ + +
+[docs] + def getRuntime(self): + """Calculating the total amount of runtime taken by the mining process + + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self.__endTime - self.__startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataframe = {} + data = [] + for a, b in self.__finalPatterns.items(): + data.append([a, b]) + dataframe = _fp._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataframe
+ + +
+[docs] + def save(self, outFile): + """Complete set of frequent patterns will be loaded in to a output file + + :param outFile: name of the output file + :type outFile: file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self.__finalPatterns.items(): + s1 = x + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self): + """ Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self.__finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + this function is used to print the results + :return: None + """ + print("Total number of Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_fp._sys.argv) == 4 or len(_fp._sys.argv) == 5: + if len(_fp._sys.argv) == 5: + _ap = CFPGrowthPlus(_fp._sys.argv[1], _fp._sys.argv[3], _fp._sys.argv[4]) + if len(_fp._sys.argv) == 4: + _ap = CFPGrowthPlus(_fp._sys.argv[1], _fp._sys.argv[3]) + _ap.startMine() + _Patterns = _ap.getPatterns() + print("Total number of Frequent Patterns:", len(_Patterns)) + _ap.savePatterns(_fp._sys.argv[2]) + _memUSS = _ap.getMemoryUSS() + print("Total Memory in USS:", _memUSS) + _memRSS = _ap.getMemoryRSS() + print("Total Memory in RSS", _memRSS) + _run = _ap.getRuntime() + print("Total ExecutionTime in ms:", _run) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/partialPeriodicFrequentPattern/basic/abstract.html b/sphinx/_build/html/_modules/PAMI/partialPeriodicFrequentPattern/basic/abstract.html new file mode 100644 index 000000000..bd5c7cbec --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/partialPeriodicFrequentPattern/basic/abstract.html @@ -0,0 +1,309 @@ + + + + + + PAMI.partialPeriodicFrequentPattern.basic.abstract — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.partialPeriodicFrequentPattern.basic.abstract

+from abc import ABC, abstractmethod
+import time
+import math
+import csv
+import pandas as pd
+from collections import defaultdict
+from itertools import combinations as c
+import os
+import os.path
+import psutil
+import sys
+import validators
+from urllib.request import urlopen
+
+
+
+[docs] +class partialPeriodicPatterns(ABC): + """ + :Description: This abstract base class defines the variables and methods that every partial periodic pattern mining algorithm must + employ in PAMI + + :Attributes: + + iFile : str + Input file name or path of the input file + minSup: float + UserSpecified minimum support value. It has to be given in terms of count of total number of transactions + in the input database/file + startTime:float + To record the start time of the algorithm + endTime:float + To record the completion time of the algorithm + finalPatterns: dict + Storing the complete set of patterns in a dictionary variable + oFile : str + Name of the output file to store complete set of frequent patterns + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + :Methods: + + startMine() + Mining process will start from here + getFrequentPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to data frame + getMemoryUSS() + Total amount of USS memory consumed by the program will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the program will be retrieved from this function + getRuntime() + Total amount of runtime taken by the program will be retrieved from this function + """ + + def __init__(self, iFile, minSup, maxPer, minPR, sep = '\t'): + """ + :param iFile: Input file name or path of the input file + :type iFile: str + :param minSup: UserSpecified minimum support value. It has to be given in terms of count of total number of + transactions in the input database/file + :type minSup: float/int + """ + + self._partialPeriodicPatterns__iFile = iFile + self._partialPeriodicPatterns__minSup = minSup + self._partialPeriodicPatterns__maxPer = maxPer + self._partialPeriodicPatterns__minPR = minPR + self._partialPeriodicPatterns__sep = sep + + @abstractmethod + def __iFile(self): + """Variable to store the input file path/file name""" + + pass + + @abstractmethod + def __minSup(self): + """Variable to store the user-specified minimum support value""" + + pass + + @abstractmethod + def __maxPer(self): + """Variable to store the user specified maximum periodicity value""" + + pass + + @abstractmethod + def __sep(self): + """Variable to store the user specified maximum periodicity value""" + + pass + + @abstractmethod + def __startTime(self): + """Variable to store the start time of the mining process""" + + pass + + @abstractmethod + def __endTime(self): + """Variable to store the end time of the complete program""" + + pass + + @abstractmethod + def __memoryUSS(self): + """Variable to store the end time of the complete program""" + + pass + + @abstractmethod + def __memoryRSS(self): + """Variable to store the end time of the complete program""" + + pass + + @abstractmethod + def __finalPatterns(self): + """Variable to store the complete set of patterns in a dictionary""" + + pass + + @abstractmethod + def __oFile(self): + """Variable to store the name of the output file to store the complete set of frequent patterns""" + + pass + +
+[docs] + @abstractmethod + def startMine(self): + """Code for the mining process will start from this function""" + + pass
+ + +
+[docs] + @abstractmethod + def getPatterns(self): + """Complete set of frequent patterns generated will be retrieved from this function""" + + pass
+ + +
+[docs] + @abstractmethod + def save(self, oFile): + """Complete set of frequent patterns will be saved in to an output file from this function + :param oFile: Name of the output file + :type oFile: csv file + """ + + pass
+ + +
+[docs] + @abstractmethod + def getPatternsAsDataFrame(self): + """Complete set of frequent patterns will be loaded in to data frame from this function""" + + pass
+ + +
+[docs] + @abstractmethod + def getMemoryUSS(self): + """Total amount of USS memory consumed by the program will be retrieved from this function""" + + pass
+ + +
+[docs] + @abstractmethod + def getMemoryRSS(self): + """Total amount of RSS memory consumed by the program will be retrieved from this function""" + pass
+ + +
+[docs] + @abstractmethod + def getRuntime(self): + """Total amount of runtime taken by the program will be retrieved from this function""" + + pass
+ + +
+[docs] + @abstractmethod + def printResults(self): + """ To print all the results of execution. """ + + pass
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/partialPeriodicPattern/basic/PPPGrowth.html b/sphinx/_build/html/_modules/PAMI/partialPeriodicPattern/basic/PPPGrowth.html new file mode 100644 index 000000000..1465c07ac --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/partialPeriodicPattern/basic/PPPGrowth.html @@ -0,0 +1,842 @@ + + + + + + PAMI.partialPeriodicPattern.basic.PPPGrowth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.partialPeriodicPattern.basic.PPPGrowth

+# PPPGrowth is fundamental approach to mine the partial periodic patterns in temporal database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#         from PAMI.periodicFrequentPattern.basic import PPPGrowth as alg
+#
+#         obj = alg.PPPGrowth(iFile, minPS, period)
+#
+#         obj.startMine()
+#
+#         partialPeriodicPatterns = obj.getPatterns()
+#
+#         print("Total number of partial periodic Patterns:", len(partialPeriodicPatterns))
+#
+#         obj.save(oFile)
+#
+#         Df = obj.getPatternInDf()
+#
+#         memUSS = obj.getMemoryUSS()
+#
+#         print("Total Memory in USS:", memUSS)
+#
+#         memRSS = obj.getMemoryRSS()
+#
+#         print("Total Memory in RSS", memRSS)
+#
+#         run = obj.getRuntime()
+#
+#         print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+
+from PAMI.partialPeriodicPattern.basic import abstract as _abstract
+from typing import List, Dict, Tuple, Set, Union, Any, Iterable, Generator
+import validators as _validators
+from urllib.request import urlopen as _urlopen
+import sys as _sys
+import pandas as pd
+from deprecated import deprecated
+
+_minPS = float()
+_period = float()
+_lno = int()
+
+class _Node(object):
+    """
+    A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+
+        item : int
+            storing item of a node
+        timeStamps : list
+            To maintain the timestamps of transaction at the end of the branch
+        parent : node
+            To maintain the parent of every node
+        children : list
+            To maintain the children of node
+
+    :Methods:
+
+        addChild(itemName)
+        storing the children to their respective parent nodes
+    """
+
+    def __init__(self, item: int, children: Dict) -> None:
+        self.item = item
+        self.children = children
+        self.parent = None
+        self.timeStamps = []
+
+    def addChild(self, node: '_Node') -> None:
+        self.children[node.item] = node
+        node.parent = self
+
+
+class _Tree(object):
+    """
+    A class used to represent the frequentPatternGrowth tree structure
+
+    :Attributes:
+
+        root : Node
+            Represents the root node of the tree
+        summaries : dictionary
+            storing the nodes with same item name
+        info : dictionary
+            stores the support of items
+
+
+    :Methods:
+
+        addTransaction(transaction)
+            creating transaction as a branch in frequentPatternTree
+        getConditionalPatterns(Node)
+            generates the conditional patterns from tree for specific node
+        conditionalTransactions(prefixPaths,Support)
+            takes the prefixPath of a node and support at child of the path and extract the frequent items from
+            prefixPaths and generates prefixPaths with items which are frequent
+        remove(Node)
+            removes the node from tree once after generating all the patterns respective to the node
+        generatePatterns(Node)
+            starts from the root node of the tree and mines the frequent patterns
+
+    """
+
+    def __init__(self) -> None:
+        self.root = _Node(None, {})
+        self.summaries = {}
+        self.info = {}
+
+    def _addTransaction(self, transaction: List, tid: List) -> None:
+        """
+        adding transaction into tree
+
+        :param transaction : it represents the one transactions in database
+        :type transaction : list
+        :param tid : represents the timestamp of transaction
+        :type tid : list
+        """
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _Node(transaction[i], {})
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].append(newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+        currentNode.timeStamps = currentNode.timeStamps + tid
+
+    def _getConditionalPatterns(self, alpha: '_Node') -> Tuple[List, List, Dict]:
+        """
+        generates all the conditional patterns of respective node
+
+        :param alpha : it represents the Node in tree
+        :type alpha : Node
+        :return: tuple
+        """
+        finalPatterns = []
+        finalSets = []
+        for i in self.summaries[alpha]:
+            set1 = i.timeStamps
+            set2 = []
+            while i.parent.item is not None:
+                set2.append(i.parent.item)
+                i = i.parent
+            if len(set2) > 0:
+                set2.reverse()
+                finalPatterns.append(set2)
+                finalSets.append(set1)
+        finalPatterns, finalSets, info = self._conditionalTransactions(finalPatterns, finalSets)
+        return finalPatterns, finalSets, info
+
+    def _generateTimeStamps(self, node: '_Node') -> List:
+        """
+        generates the Time Stamps
+
+        :param node : it represents the node in tree
+        :type node : list
+        """
+        finalTs = node.timeStamps
+        return finalTs
+
+    def _removeNode(self, nodeValue: int) -> None:
+        """
+        removing the node from tree
+
+        :param nodeValue : it represents the node in tree
+        :type nodeValue : node
+        :return: None
+        """
+        for i in self.summaries[nodeValue]:
+            i.parent.timeStamps = i.parent.timeStamps + i.timeStamps
+            del i.parent.children[nodeValue]
+
+    def _getTimeStamps(self, alpha: '_Node') -> List:
+        """
+        Returns the timeStamps of a node
+
+        :param alpha: node of tree
+        :return: timeStamps of a node
+
+        """
+        temporary = []
+        for i in self.summaries[alpha]:
+            temporary += i.timeStamps
+        return temporary
+
+    def _getPeriodicSupport(self, timeStamps: List) -> int:
+        """
+        calculates the support and periodicity with list of timestamps
+
+        :param timeStamps : timestamps of a pattern
+        :type timeStamps : lis
+        :return: int
+        """
+        timeStamps.sort()
+        per = 0
+        sup = 0
+        for i in range(len(timeStamps) - 1):
+            j = i + 1
+            if abs(timeStamps[j] - timeStamps[i]) <= _period:
+                per += 1
+            sup += 1
+        return per
+
+    def _conditionalTransactions(self, conditionalPatterns: List, conditionalTimeStamps: List) -> Tuple[List, List, Dict]:
+        """
+        It generates the conditional patterns with periodic frequent items
+
+        :param conditionalPatterns : conditional_patterns generated from condition_pattern method for
+                                respective node
+        :type conditionalPatterns : list
+        :param conditionalTimeStamps : represents the timestamps of conditional patterns of a node
+        :type conditionalTimeStamps : list
+
+        :return: tuple
+        """
+        global _minPS, _period
+        patterns = []
+        timeStamps = []
+        data1 = {}
+        for i in range(len(conditionalPatterns)):
+            for j in conditionalPatterns[i]:
+                if j in data1:
+                    data1[j] = data1[j] + conditionalTimeStamps[i]
+                else:
+                    data1[j] = conditionalTimeStamps[i]
+        updatedDictionary = {}
+        for m in data1:
+            updatedDictionary[m] = self._getPeriodicSupport(data1[m])
+        updatedDictionary = {k: v for k, v in updatedDictionary.items() if v >= _minPS}
+        count = 0
+        for p in conditionalPatterns:
+            p1 = [v for v in p if v in updatedDictionary]
+            trans = sorted(p1, key=lambda x: (updatedDictionary.get(x), -x), reverse=True)
+            if len(trans) > 0:
+                patterns.append(trans)
+                timeStamps.append(conditionalTimeStamps[count])
+            count += 1
+        return patterns, timeStamps, updatedDictionary
+
+    def _generatePatterns(self, prefix: List) -> Iterable[Tuple[List, int]]:
+        """
+        generates the patterns
+
+        :param prefix : forms the combination of items
+        :type prefix : list
+        :return : list
+        """
+        for i in sorted(self.summaries, key=lambda x: (self.info.get(x), -x)):
+            pattern = prefix[:]
+            pattern.append(i)
+            yield pattern, self.info[i]
+            patterns, timeStamps, info = self._getConditionalPatterns(i)
+            conditionalTree = _Tree()
+            conditionalTree.info = info.copy()
+            for pat in range(len(patterns)):
+                conditionalTree._addTransaction(patterns[pat], timeStamps[pat])
+            if len(patterns) > 0:
+                for q in conditionalTree._generatePatterns(pattern):
+                    yield q
+            self._removeNode(i)
+
+
+
+[docs] +class PPPGrowth(_abstract._partialPeriodicPatterns): + """ + :Description: 3pgrowth is fundamental approach to mine the partial periodic patterns in temporal database. + + :Reference: Discovering Partial Periodic Itemsets in Temporal Databases,SSDBM '17: Proceedings of the 29th International Conference on Scientific and Statistical Database ManagementJune 2017 + Article No.: 30 Pages 1–6https://doi.org/10.1145/3085504.3085535 + + :param iFile: str : + Name of the Input file to mine complete set of frequent pattern's + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minPS: float: + Minimum partial periodic pattern... + :param period: float: + Minimum partial periodic... + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + iFile : file + Name of the Input file or path of the input file + oFile : file + Name of the output file or path of the output file + minPS: float or int or str + The user can specify minPS either in count or proportion of database size. + If the program detects the data type of minPS is integer, then it treats minPS is expressed in count. + Otherwise, it will be treated as float. + Example: minPS=10 will be treated as integer, while minPS=10.0 will be treated as float + period: float or int or str + The user can specify period either in count or proportion of database size. + If the program detects the data type of period is integer, then it treats period is expressed in count. + Otherwise, it will be treated as float. + Example: period=10 will be treated as integer, while period=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transactions + tree : class + it represents the Tree class + finalPatterns : dict + it represents to store the patterns + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets() + Scans the dataset or dataframes and stores in list format + partialPeriodicOneItem() + Extracts the one-frequent patterns from transactions + updateTransactions() + updates the transactions by removing the aperiodic items and sort the transactions with items + by decreasing support + buildTree() + constrcuts the main tree by setting the root node as null + startMine() + main program to mine the partial periodic patterns + + **Executing the code on terminal:** + -------------------------------------- + .. code-block:: console + + + Format: + + (.venv) $python3 PPPGrowth.py <inputFile> <outputFile> <minPS> <period> + + Examples: + + (.venv) $ python3 PPPGrowth.py sampleDB.txt patterns.txt 10.0 2.0 + + + **Sample run of the importing code:** + ----------------------------------------- + .. code-block:: python + + from PAMI.periodicFrequentPattern.basic import PPPGrowth as alg + + obj = alg.PPPGrowth(iFile, minPS, period) + + obj.startMine() + + partialPeriodicPatterns = obj.getPatterns() + + print("Total number of partial periodic Patterns:", len(partialPeriodicPatterns)) + + obj.save(oFile) + + Df = obj.getPatternInDf() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + + **Credits:** + ----------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.\n + + """ + _minPS = float() + _period = float() + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _rank = {} + _rankdup = {} + _lno = 0 + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + :return: None + """ + self._Database = [] + if isinstance(self._iFile, _abstract._pd.DataFrame): + data, tids = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + tids = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + for i in range(len(data)): + tr = [tids[i][0]] + tr = tr + data[i] + self._Database.append(tr) + self._lno = len(self._Database) + # print(self.Database) + if isinstance(self._iFile, str): + if _validators.url(self._iFile): + data = _urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _partialPeriodicOneItem(self) -> Tuple[Dict, List]: + """ + calculates the support of each item in the dataset and assign the ranks to the items by decreasing support and returns the frequent items list + :return: tuple + """ + data = {} + self._period = self._convert(self._period) + self._minPS = self._convert(self._minPS) + for tr in self._Database: + for i in range(1, len(tr)): + if tr[i] not in data: + data[tr[i]] = [0, int(tr[0]), 1] + else: + lp = int(tr[0]) - data[tr[i]][1] + if lp <= self._period: + data[tr[i]][0] += 1 + data[tr[i]][1] = int(tr[0]) + data[tr[i]][2] += 1 + data = {k: v[0] for k, v in data.items() if v[0] >= self._minPS} + pfList = [k for k, v in sorted(data.items(), key=lambda x: x[1], reverse=True)] + self._rank = dict([(index, item) for (item, index) in enumerate(pfList)]) + return data, pfList + + def _updateTransactions(self, dict1: Dict) -> List[List]: + """ + remove the items which are not frequent from transactions and updates the transactions with rank of items + + :param dict1 : frequent items with support + :type dict1 : dictionary + :return: list + """ + list1 = [] + for tr in self._Database: + list2 = [int(tr[0])] + for i in range(1, len(tr)): + if tr[i] in dict1: + list2.append(self._rank[tr[i]]) + if len(list2) >= 2: + basket = list2[1:] + basket.sort() + list2[1:] = basket[0:] + list1.append(list2) + return list1 + + def _buildTree(self, data: List[List], info: Dict) -> '_Tree': + """ + it takes the transactions and support of each item and construct the main tree with setting root + node as null + + :param data : it represents the one transactions in database + :type data : list + :param info : it represents the support of each item + :type info : dictionary + :return: tree + """ + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(data)): + set1 = [] + set1.append(data[i][0]) + rootNode._addTransaction(data[i][1:], set1) + return rootNode + + def _savePeriodic(self, itemset: List) -> str: + """ + To convert the pattern with its original item name + + :param itemset: partial periodic pattern. + :return: pattern with original item name + """ + temp = str() + for i in itemset: + temp = temp + self._rankdup[i] + "\t" + return temp + + def _convert(self, value: Union[int, float, str]) -> Union[int, float]: + """ + To convert the given user specified value + + :param value: user specified value + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self) -> None: + """ + Main method where the patterns are mined by constructing tree. + :return: None + """ + + self.mine()
+ + + +
+[docs] + def mine(self) -> None: + """ + Main method where the patterns are mined by constructing tree. + :return: None + + """ + global _minPS, _period, _lno + self._startTime = _abstract._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minPS is None: + raise Exception("Please enter the Minimum Support") + self._creatingItemSets() + generatedItems, pfList = self._partialPeriodicOneItem() + _minPS, _period, _lno = self._minPS, self._period, len(self._Database) + updatedTransactions = self._updateTransactions(generatedItems) + for x, y in self._rank.items(): + self._rankdup[y] = x + info = {self._rank[k]: v for k, v in generatedItems.items()} + Tree = self._buildTree(updatedTransactions, info) + patterns = Tree._generatePatterns([]) + self._finalPatterns = {} + for i in patterns: + s = self._savePeriodic(i[0]) + self._finalPatterns[s] = i[1] + self._endTime = _abstract._time.time() + process = _abstract._psutil.Process(_abstract._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Partial Periodic Patterns were generated successfully using 3PGrowth algorithm ")
+ + + +
+[docs] + def getMemoryUSS(self) -> float: + """Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _abstract._pd.DataFrame: + """Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _abstract._pd.DataFrame(data, columns=['Patterns', 'periodicSupport']) + return dataFrame
+ + +
+[docs] + def save(self, outFile: str) -> None: + """Complete set of frequent patterns will be loaded in to a output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> Dict[str, int]: + """ Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + :return: None + """ + print("Total number of Partial Periodic Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_sys.argv) == 5 or len(_sys.argv) == 6: + if len(_sys.argv) == 6: + _ap = PPPGrowth(_sys.argv[1], _sys.argv[3], _sys.argv[4], _sys.argv[5]) + if len(_sys.argv) == 5: + _ap = PPPGrowth(_sys.argv[1], _sys.argv[3], _sys.argv[4]) + _ap.startMine() + print("Total number of Partial Periodic Patterns:", len(_ap.getPatterns())) + _ap.save(_sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/partialPeriodicPattern/basic/PPP_ECLAT.html b/sphinx/_build/html/_modules/PAMI/partialPeriodicPattern/basic/PPP_ECLAT.html new file mode 100644 index 000000000..9cb1ff0d8 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/partialPeriodicPattern/basic/PPP_ECLAT.html @@ -0,0 +1,655 @@ + + + + + + PAMI.partialPeriodicPattern.basic.PPP_ECLAT — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.partialPeriodicPattern.basic.PPP_ECLAT

+# 3pEclat is the fundamental approach to mine the partial periodic frequent patterns.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.periodicFrequentPattern.basic import PPP_ECLAT as alg
+#
+#             obj = alg.PPP_ECLAT(iFile, minPS, period)
+#
+#             obj.startMine()
+#
+#             Patterns = obj.getPatterns()
+#
+#             print("Total number of partial periodic patterns:", len(Patterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+
+
+from PAMI.partialPeriodicPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+import pandas as pd
+
+from PAMI.partialPeriodicPattern.basic import abstract as _ab
+import pandas as pd
+from deprecated import deprecated
+
+
+[docs] +class PPP_ECLAT(_ab._partialPeriodicPatterns): + """ + :Descripition: 3pEclat is the fundamental approach to mine the partial periodic frequent patterns. + + :Reference: R. Uday Kirana,b,∗ , J.N. Venkateshd, Masashi Toyodaa , Masaru Kitsuregawaa,c , P. Krishna Reddy Discovering partial periodic-frequent patterns in a transactional database + https://www.tkl.iis.u-tokyo.ac.jp/new/uploads/publication_file/file/774/JSS_2017.pdf + + :param iFile: str : + Name of the Input file to mine complete set of frequent pattern's + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param minPS: float: + Minimum partial periodic pattern... + :param period: float: + Minimum partial periodic... + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + self.iFile : file + Name of the Input file or path of the input file + self. oFile : file + Name of the output file or path of the output file + minPS: float or int or str + The user can specify minPS either in count or proportion of database size. + If the program detects the data type of minPS is integer, then it treats minPS is expressed in count. + Otherwise, it will be treated as float. + Example: minPS=10 will be treated as integer, while minPS=10.0 will be treated as float + period: float or int or str + The user can specify period either in count or proportion of database size. + If the program detects the data type of period is integer, then it treats period is expressed in count. + Otherwise, it will be treated as float. + Example: period=10 will be treated as integer, while period=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transactions + tree : class + it represents the Tree class + finalPatterns : dict + it represents to store the patterns + tidList : dict + stores the timestamps of an item + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to an output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingOneitemSets() + Scan the database and store the items with their timestamps which are periodic frequent + getPeriodAndSupport() + Calculates the support and period for a list of timestamps. + Generation() + Used to implement prefix class equivalence method to generate the periodic patterns recursively + + **Executing the code on terminal:** + ---------------------------------------- + .. code-block:: console + + + Format: + + (.venv) $ python3 PPP_ECLAT.py <inputFile> <outputFile> <minPS> <period> + + Examples: + + (.venv) $ python3 PPP_ECLAT.py sampleDB.txt patterns.txt 0.3 0.4 + + + **Sample run of importing the code:** + ----------------------------------------- + ... code-block:: python + + from PAMI.periodicFrequentPattern.basic import PPP_ECLAT as alg + + obj = alg.PPP_ECLAT(iFile, minPS,period) + + obj.startMine() + + Patterns = obj.getPatterns() + + print("Total number of partial periodic patterns:", len(Patterns)) + + obj.save(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + ------------------ + The complete program was written by P.RaviKumar under the supervision of Professor Rage Uday Kiran.\n + + """ + + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _mapSupport = {} + _itemsetCount = 0 + _writer = None + _minPS = str() + _period = str() + _tidList = {} + _lno = 0 + _Database = [] + + def _convert(self, value) -> Union[int, float]: + """ + To convert the given user specified value + :param value: user specified value + + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + def _getPeriodicSupport(self, timeStamps: list) -> int: + """ + calculates the support and periodicity with list of timestamps. + + :param timeStamps : timestamps of a pattern + :type timeStamps : list + :return: list + """ + timeStamps.sort() + per = 0 + for i in range(len(timeStamps) - 1): + j = i + 1 + if abs(timeStamps[j] - timeStamps[i]) <= self._period: + per += 1 + return per + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + :return: None + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + data, tids = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + tids = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + for i in range(len(data)): + tr = [tids[i][0]] + tr = tr + data[i] + self._Database.append(tr) + self._lno = len(self._Database) + + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + self._lno += 1 + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + self._lno += 1 + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _creatingOneitemSets(self) -> List[str]: + """ + Scans the Temporal database / Input file and stores the 1-length partial-periodic patterns. + :return: list + """ + plist = [] + self._tidList = {} + self._mapSupport = {} + self._period = self._convert(self._period) + for line in self._Database: + s = line + n = int(s[0]) + for i in range(1, len(s)): + si = s[i] + if self._mapSupport.get(si) is None: + self._mapSupport[si] = [0, n] + self._tidList[si] = [n] + else: + lp = n - self._mapSupport[si][1] + if lp <= self._period: + self._mapSupport[si][0] += 1 + self._mapSupport[si][1] = n + self._tidList[si].append(n) + self._minPS = self._convert(self._minPS) + self._mapSupport = {k: v[0] for k, v in self._mapSupport.items() if v[0] >= self._minPS} + plist = [key for key, value in sorted(self._mapSupport.items(), key=lambda x: x[1], reverse=True)] + return plist + + def _save(self, prefix: List[str], suffix: List[str], tidSetX: List[int]) -> None: + """ + saves the patterns that satisfy the partial periodic property. + + :param prefix: the prefix of a pattern + :type prefix: list + :param suffix : the suffix of a patterns + :type suffix : list + :param tidSetX : the timestamp of a patterns + :type tidSetX : list + :return: None + """ + + if prefix is None: + prefix = suffix + else: + prefix = prefix + suffix + val = self._getPeriodicSupport(tidSetX) + if val >= self._minPS: + sample = str() + for i in prefix: + sample = sample + i + "\t" + self._finalPatterns[sample] = val + + def _Generation(self, prefix: List[str], itemSets: List[str], tidSets: List[list]) -> None: + """ + Generates the patterns following Equivalence-class methods + + :param prefix : main equivalence prefix + :type prefix : partial-periodic item or pattern + :param itemSets : patterns which are items combined with prefix and satisfying the periodicity + and partial property with their timestamps + :type itemSets : list + :param tidSets : timestamps of the items in the argument itemSets + :type tidSets : list + :return: None + """ + if len(itemSets) == 1: + i = itemSets[0] + tidi = tidSets[0] + self._save(prefix, [i], tidi) + return + for i in range(len(itemSets)): + itemI = itemSets[i] + if itemI is None: + continue + tidSetX = tidSets[i] + classItemSets = [] + classTidSets = [] + itemSetX = [itemI] + for j in range(i + 1, len(itemSets)): + itemJ = itemSets[j] + tidSetJ = tidSets[j] + y = list(set(tidSetX).intersection(tidSetJ)) + val = self._getPeriodicSupport(y) + if val >= self._minPS: + classItemSets.append(itemJ) + classTidSets.append(y) + newprefix = list(set(itemSetX)) + prefix + self._Generation(newprefix, classItemSets, classTidSets) + self._save(prefix, list(set(itemSetX)), tidSetX) + + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self) -> None: + """ + Main program start with extracting the periodic frequent items from the database and + performs prefix equivalence to form the combinations and generates partial-periodic patterns. + :return: None + + """ + self.mine()
+ + +
+[docs] + def Mine(self) -> None: + """ + Main program start with extracting the periodic frequent items from the database and + performs prefix equivalence to form the combinations and generates partial-periodic patterns. + :return: None + + """ + self._startTime = _ab._time.time() + self._creatingItemSets() + plist = self._creatingOneitemSets() + self._finalPatterns = {} + for i in range(len(plist)): + itemI = plist[i] + tidSetX = self._tidList[itemI] + itemSetX = [itemI] + itemSets = [] + tidSets = [] + for j in range(i + 1, len(plist)): + itemJ = plist[j] + tidSetJ = self._tidList[itemJ] + y1 = list(set(tidSetX).intersection(tidSetJ)) + val = self._getPeriodicSupport(y1) + if val >= self._minPS: + itemSets.append(itemJ) + tidSets.append(y1) + self._Generation(itemSetX, itemSets, tidSets) + self._save(None, itemSetX, tidSetX) + print("Partial Periodic Patterns were generated successfully using 3PEclat algorithm") + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryRSS = float() + self._memoryUSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataframe = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataframe = _ab._pd.DataFrame(data, columns=['Patterns', 'periodicSupport']) + return dataframe
+ + +
+[docs] + def save(self, outFile: str) -> None: + """Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> Dict[str, int]: + """ Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + :return: None + """ + print("Total number of Partial Periodic Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = PPP_ECLAT(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = PPP_ECLAT(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + print("Total number of Partial Periodic Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + for i in [100, 200, 300, 400, 500]: + _ap = PPP_ECLAT('/Users/Likhitha/Downloads/temporal_T10I4D100K.csv', i, 5000, '\t') + _ap.startMine() + print("Total number of Maximal Partial Periodic Patterns:", len(_ap.getPatterns())) + _ap.save('/Users/Likhitha/Downloads/output.txt') + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/partialPeriodicPattern/closed/PPPClose.html b/sphinx/_build/html/_modules/PAMI/partialPeriodicPattern/closed/PPPClose.html new file mode 100644 index 000000000..3dedfc877 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/partialPeriodicPattern/closed/PPPClose.html @@ -0,0 +1,717 @@ + + + + + + PAMI.partialPeriodicPattern.closed.PPPClose — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.partialPeriodicPattern.closed.PPPClose

+
+
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.partialPeriodicPattern.closed import PPPClose as alg
+#
+#             obj = alg.PPPClose("../basic/sampleTDB.txt", "2", "6")
+#
+#             obj.startMine()
+#
+#             periodicFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(periodicFrequentPatterns))
+#
+#             obj.save("patterns")
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+#
+#
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+
+
+import sys as _sys
+import validators as _validators
+from urllib.request import urlopen as _urlopen
+from PAMI.partialPeriodicPattern.closed import abstract as _abstract
+import pandas as pd
+from deprecated import deprecated
+
+
+[docs] +class PPPClose(_abstract._partialPeriodicPatterns): + """ + :Description: + + PPPClose algorithm is used to discover the closed partial periodic patterns in temporal databases. + It uses depth-first search. + + :Reference: R. Uday Kiran1 , J. N. Venkatesh2 , Philippe Fournier-Viger3 , Masashi Toyoda1 , P. Krishna Reddy2 and Masaru Kitsuregawa + https://www.tkl.iis.u-tokyo.ac.jp/new/uploads/publication_file/file/799/PAKDD.pdf + + :param iFile: str : + Name of the Input file to mine complete set of periodic frequent pattern's + :param oFile: str : + Name of the output file to store complete set of periodic frequent pattern's + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :param iFile: str : + Name of the Input file to mine complete set of frequent pattern's + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param period: float: + Minimum partial periodic... + :param periodicSupport: float: + Minimum partial periodic... + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + iFile : str + Input file name or path of the input file + oFile : str + Name of the output file or path of the input file + periodicSupport: int or float or str + The user can specify periodicSupport either in count or proportion of database size. + If the program detects the data type of periodicSupport is integer, then it treats periodicSupport is expressed in count. + Otherwise, it will be treated as float. + Example: periodicSupport=10 will be treated as integer, while periodicSupport=10.0 will be treated as float + period: int or float or str + The user can specify period either in count or proportion of database size. + If the program detects the data type of period is integer, then it treats period is expressed in count. + Otherwise, it will be treated as float. + Example: period=10 will be treated as integer, while period=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default separator is tab space or \t. + However, the users can override their default separator. + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + finalPatterns: dict + Storing the complete set of patterns in a dictionary variable + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + + **Executing the code on terminal:** + ------------------------------------- + .. code-block:: console + + + Format: + + (.venv) $ python3 PPPClose.py <inputFile> <outputFile> <periodicSupport> <period> + + Examples: + + (.venv) $ python3 PPPClose.py sampleTDB.txt patterns.txt 0.3 0.4 + + + **Sample run of the imported code:** + -------------------------------------- + .. code-block:: python + + from PAMI.partialPeriodicPattern.closed import PPPClose as alg + + obj = alg.PPPClose("../basic/sampleTDB.txt", "2", "6") + + obj.startMine() + + periodicFrequentPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(periodicFrequentPatterns)) + + obj.save("patterns") + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + -------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.\n + + """ + + _periodicSupport = float() + _period = float() + _startTime = float() + _endTime = float() + _finalPatterns = {} + _Database = [] + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _transaction = [] + _hashing = {} + _mapSupport = {} + _itemSetCount = 0 + _maxItemId = 0 + _tableSize = 10000 + _tidList = {} + _lno = 0 + + def _convert(self, value): + """ + To convert the given user specified value + + :param value: user specified value + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (self._lno * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (self._lno * value) + else: + value = int(value) + return value + + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + if isinstance(self._iFile, _abstract._pd.DataFrame): + timeStamp, data = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + timeStamp = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + for i in range(len(data)): + tr = [timeStamp[i]] + tr = tr + data[i] + self._Database.append(tr) + self._lno = len(self._Database) + if isinstance(self._iFile, str): + if _validators.url(self._iFile): + data = _urlopen(self._iFile) + for line in data: + self._lno += 1 + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + self._lno += 1 + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _OneLengthPartialItems(self): + """ + To scan the database and extracts the 1-length periodic-frequent items + + :return: Returns the 1-length periodic-frequent items + """ + self._mapSupport = {} + self._tidList = {} + self._period = self._convert(self._period) + for line in self._Database: + n = int(line[0]) + for i in range(1, len(line)): + si = line[i] + if self._mapSupport.get(si) is None: + self._mapSupport[si] = [1, 0, n] + self._tidList[si] = [n] + else: + self._mapSupport[si][0] += 1 + period = abs(n - self._mapSupport[si][2]) + if period <= self._period: + self._mapSupport[si][1] += 1 + self._mapSupport[si][2] = n + self._tidList[si].append(n) + for x, y in self._mapSupport.items(): + period = abs(self._lno - self._mapSupport[x][2]) + if period <= self._period: + self._mapSupport[x][1] += 1 + self._periodicSupport = self._convert(self._periodicSupport) + self._mapSupport = {k: v[1] for k, v in self._mapSupport.items() if v[1] >= self._periodicSupport} + periodicFrequentItems = {} + self._tidList = {k: v for k, v in self._tidList.items() if k in self._mapSupport} + for x, y in self._tidList.items(): + t1 = 0 + for i in y: + t1 += i + periodicFrequentItems[x] = t1 + periodicFrequentItems = [key for key, value in sorted(periodicFrequentItems.items(), key=lambda x: x[1])] + return periodicFrequentItems + + def _calculate(self, tidSet): + """ + To calculate the weight if pattern based on the respective timeStamps + + :param tidSet: timeStamps of the pattern + :return: the calculated weight of the timeStamps + """ + hashcode = 0 + for i in tidSet: + hashcode += i + if hashcode < 0: + hashcode = abs(0 - hashcode) + return hashcode % self._tableSize + + def _contains(self, itemSet, val, hashcode): + """ + To check if the key(hashcode) is in dictionary(hashing) variable + + :param itemSet: generated periodic-frequent itemSet + :param val: support and period of itemSet + :param hashcode: the key generated in calculate() method for every itemSet + + :return: true if itemSet with same support present in dictionary(hashing) or else returns false + """ + if self._hashing.get(hashcode) is None: + return False + for i in self._hashing[hashcode]: + itemSetX = i + if val == self._hashing[hashcode][itemSetX] and set(itemSetX).issuperset(itemSet): + return True + return False + + def _getPeriodicSupport(self, timeStamps): + """ + Calculates the period and support of timeStamps + + :param: timeStamps: timeStamps of itemSet + :return: period and support + """ + timeStamps.sort() + sup = 0 + for j in range(len(timeStamps) - 1): + per = abs(timeStamps[j + 1] - timeStamps[j]) + if per <= self._period: + sup += 1 + return sup + + def _save(self, prefix, suffix, tidSetX): + """ + Saves the generated pattern which satisfies the closed property + + :param prefix: the prefix part of itemSet + :param suffix: the suffix part of itemSet + :param tidSetX: the timeStamps of the generated itemSet + :return: saves the closed periodic-frequent pattern + """ + if prefix is None: + prefix = suffix + else: + prefix = prefix + suffix + prefix = list(set(prefix)) + prefix.sort() + val = self._getPeriodicSupport(tidSetX) + if val >= self._periodicSupport: + hashcode = self._calculate(tidSetX) + if self._contains(prefix, val, hashcode) is False: + self._itemSetCount += 1 + sample = str() + for i in prefix: + sample = sample + i + "\t" + self._finalPatterns[sample] = val + if hashcode not in self._hashing: + self._hashing[hashcode] = {tuple(prefix): val} + else: + self._hashing[hashcode][tuple(prefix)] = val + + def _processEquivalenceClass(self, prefix, itemSets, tidSets): + """ + + :param prefix: Prefix class of an itemSet + :param itemSets: suffix items in periodicFrequentItems that satisfies the periodicSupport condition + :param tidSets: timeStamps of items in itemSets respectively + :return: closed periodic patterns with length more than 2 + """ + if len(itemSets) == 1: + i = itemSets[0] + tidList = tidSets[0] + self._save(prefix, [i], tidList) + return + if len(itemSets) == 2: + itemI = itemSets[0] + tidSetI = tidSets[0] + itemJ = itemSets[1] + tidSetJ = tidSets[1] + y1 = list(set(tidSetI).intersection(tidSetJ)) + if len(y1) >= self._periodicSupport: + suffix = [] + suffix += [itemI, itemJ] + suffix = list(set(suffix)) + self._save(prefix, suffix, y1) + if len(y1) != len(tidSetI): + self._save(prefix, [itemI], tidSetI) + if len(y1) != len(tidSetJ): + self._save(prefix, [itemJ], tidSetJ) + return + for i in range(len(itemSets)): + itemX = itemSets[i] + if itemX is None: + continue + tidSetX = tidSets[i] + classItemSets = [] + classTidSets = [] + itemSetX = [itemX] + for j in range(i + 1, len(itemSets)): + itemJ = itemSets[j] + if itemJ is None: + continue + tidSetJ = tidSets[j] + y = list(set(tidSetX).intersection(tidSetJ)) + if len(y) < self._periodicSupport: + continue + if len(tidSetX) == len(tidSetJ) and len(y) == len(tidSetX): + itemSets.insert(j, None) + tidSets.insert(j, None) + itemSetX.append(itemJ) + elif len(tidSetX) < len(tidSetJ) and len(y) == len(tidSetX): + itemSetX.append(itemJ) + elif len(tidSetX) > len(tidSetJ) and len(y) == len(tidSetJ): + itemSets.insert(j, None) + tidSets.insert(j, None) + classItemSets.append(itemJ) + classTidSets.append(y) + else: + classItemSets.append(itemJ) + classTidSets.append(y) + if len(classItemSets) > 0: + newPrefix = list(set(itemSetX)) + prefix + self._processEquivalenceClass(newPrefix, classItemSets, classTidSets) + self._save(prefix, list(set(itemSetX)), tidSetX) + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self): + """ + Mining process will start from here + """ + self.mine()
+ + +
+[docs] + def mine(self): + """ + Mining process will start from here + """ + self._startTime = _abstract._time.time() + self._creatingItemSets() + self._hashing = {} + self._finalPatterns = {} + periodicFrequentItems = self._OneLengthPartialItems() + for i in range(len(periodicFrequentItems)): + itemX = periodicFrequentItems[i] + if itemX is None: + continue + tidSetX = self._tidList[itemX] + itemSetX = [itemX] + itemSets = [] + tidSets = [] + for j in range(i + 1, len(periodicFrequentItems)): + itemJ = periodicFrequentItems[j] + if itemJ is None: + continue + tidSetJ = self._tidList[itemJ] + y1 = list(set(tidSetX).intersection(tidSetJ)) + if len(y1) < self._periodicSupport: + continue + if len(tidSetX) == len(tidSetJ) and len(y1) is len(tidSetX): + periodicFrequentItems.insert(j, None) + itemSetX.append(itemJ) + elif len(tidSetX) < len(tidSetJ) and len(y1) is len(tidSetX): + itemSetX.append(itemJ) + elif len(tidSetX) > len(tidSetJ) and len(y1) is len(tidSetJ): + periodicFrequentItems.insert(j, None) + itemSets.append(itemJ) + tidSets.append(y1) + else: + itemSets.append(itemJ) + tidSets.append(y1) + if len(itemSets) > 0: + self._processEquivalenceClass(itemSetX, itemSets, tidSets) + self._save([], itemSetX, tidSetX) + self._endTime = _abstract._time.time() + process = _abstract._psutil.Process(_abstract._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Closed periodic frequent patterns were generated successfully using PPPClose algorithm ")
+ + +
+[docs] + def getMemoryUSS(self): + """Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _abstract._pd.DataFrame(data, columns=['Patterns', 'periodicSupport']) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """Complete set of frequent patterns will be loaded in to a output file + + :param outFile: name of the output file + :type outFile: file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self): + """ Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + print("Total number of Closed Partial Periodic Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_sys.argv) == 5 or len(_sys.argv) == 6: + if len(_sys.argv) == 6: + _ap = PPPClose(_sys.argv[1], _sys.argv[3], _sys.argv[4], _sys.argv[5]) + if len(_sys.argv) == 5: + _ap = PPPClose(_sys.argv[1], _sys.argv[3], _sys.argv[4]) + _ap.startMine() + print("Total number of Patterns:", len(_ap.getPatterns())) + _ap.save(_sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/partialPeriodicPattern/topk/abstract.html b/sphinx/_build/html/_modules/PAMI/partialPeriodicPattern/topk/abstract.html new file mode 100644 index 000000000..1521ea8e2 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/partialPeriodicPattern/topk/abstract.html @@ -0,0 +1,289 @@ + + + + + + PAMI.partialPeriodicPattern.topk.abstract — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.partialPeriodicPattern.topk.abstract

+#  Copyright (C)  2021 Rage Uday Kiran
+#
+#      This program is free software: you can redistribute it and/or modify
+#      it under the terms of the GNU General Public License as published by
+#      the Free Software Foundation, either version 3 of the License, or
+#      (at your option) any later version.
+#
+#      This program is distributed in the hope that it will be useful,
+#      but WITHOUT ANY WARRANTY; without even the implied warranty of
+#      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#      GNU General Public License for more details.
+#
+#      You should have received a copy of the GNU General Public License
+#      along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+from abc import ABC, abstractmethod
+import time as _time
+import math as _math
+import csv as _csv
+import pandas as _pd
+from collections import defaultdict as _defaultdict
+from itertools import combinations as _combinations
+import os as _os
+import os.path as _path
+import psutil as _psutil
+import sys as _sys
+import validators as _validators
+from urllib.request import urlopen as _urlopen
+
+
+
+[docs] +class partialPeriodicPatterns(ABC): + """ + :Description: This abstract base class defines the variables and methods that every periodic-frequent pattern mining algorithm must employ in PAMI + + :Attributes: + + iFile : str + Input file name or path of the input file + k: int or float or str + The user can specify minPS either in count or proportion of database size. + If the program detects the data type of minPS is integer, then it treats minPS is expressed in count. + Otherwise, it will be treated as float. + Example: minPS=10 will be treated as integer, while minPS=10.0 will be treated as float + period: int or float or str + The user can specify period either in count or proportion of database size. + If the program detects the data type of period is integer, then it treats period is expressed in count. + Otherwise, it will be treated as float. + Example: period=10 will be treated as integer, while period=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + startTime:float + To record the start time of the algorithm + endTime:float + To record the completion time of the algorithm + finalPatterns: dict + Storing the complete set of patterns in a dictionary variable + oFile : str + Name of the output file to store complete set of periodic-frequent patterns + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of periodic-frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of periodic-frequent patterns will be loaded in to data frame + getMemoryUSS() + Total amount of USS memory consumed by the program will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the program will be retrieved from this function + getRuntime() + Total amount of runtime taken by the program will be retrieved from this function + """ + + def __init__(self, iFile, k, period, sep = '\t'): + """ + :param iFile: Input file name or path of the input file + :type iFile: str + :param k: The user can specify minPS either in count or proportion of database size. + If the program detects the data type of minPS is integer, then it treats k is expressed in count. + Otherwise, it will be treated as float. + Example: k=10 will be treated as integer, while minPS=10.0 will be treated as float + :type k: int or float or str + :param period: The user can specify period either in count or proportion of database size. + If the program detects the data type of period is integer, then it treats period is expressed in count. + Otherwise, it will be treated as float. + Example: period=10 will be treated as integer, while period=10.0 will be treated as float + :type period: int or float or str + :param sep: separator used in user specified input file + :type sep: str + """ + + self._iFile = iFile + self._k = k + self._period = period + self._sep = sep + self._oFile = str() + self._finalPatterns = {} + self._memoryUSS = float() + self._memoryRSS = float() + self._startTime = float() + self._endTime = float() + + +
+[docs] + @abstractmethod + def startMine(self): + """Code for the mining process will start from this function""" + + pass
+ + +
+[docs] + @abstractmethod + def getPatterns(self): + """Complete set of periodic-frequent patterns generated will be retrieved from this function""" + + pass
+ + +
+[docs] + @abstractmethod + def save(self, oFile): + """Complete set of periodic-frequent patterns will be saved in to an output file from this function + + :param oFile: Name of the output file + :type oFile: file + """ + + pass
+ + +
+[docs] + @abstractmethod + def getPatternsAsDataFrame(self): + """Complete set of periodic-frequent patterns will be loaded in to data frame from this function""" + + pass
+ + +
+[docs] + @abstractmethod + def getMemoryUSS(self): + """Total amount of USS memory consumed by the program will be retrieved from this function""" + + pass
+ + +
+[docs] + @abstractmethod + def getMemoryRSS(self): + """Total amount of RSS memory consumed by the program will be retrieved from this function""" + pass
+ + +
+[docs] + @abstractmethod + def getRuntime(self): + """Total amount of runtime taken by the program will be retrieved from this function""" + + pass
+ + +
+[docs] + @abstractmethod + def printResults(self): + """ To print all the results of execution""" + pass
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/partialPeriodicPattern/topk/k3PMiner.html b/sphinx/_build/html/_modules/PAMI/partialPeriodicPattern/topk/k3PMiner.html new file mode 100644 index 000000000..75ff745fb --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/partialPeriodicPattern/topk/k3PMiner.html @@ -0,0 +1,655 @@ + + + + + + PAMI.partialPeriodicPattern.topk.k3PMiner — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.partialPeriodicPattern.topk.k3PMiner

+# k3PMiner is and algorithm to discover top - k partial periodic patterns in a temporal  database.
+#
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+
+#
+#             import PAMI.partialPeriodicPattern.topk.k3PMiner as alg
+#
+#             obj = alg.k3PMiner(iFile, k, periodicity)
+#
+#             obj.startMine()
+#
+#             partialPeriodicPatterns = obj.getPatterns()
+#
+#             print("Total number of top partial periodic Patterns:", len(partialPeriodicPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.partialPeriodicPattern.topk import abstract as _abstract
+import validators as _validators
+from urllib.request import urlopen as _urlopen
+import sys as _sys
+import pandas as pd
+from deprecated import deprecated
+
+
+[docs] +class k3PMiner(_abstract.partialPeriodicPatterns): + """ + :Description: k3PMiner is and algorithm to discover top - k partial periodic patterns in a temporal database. + + :Reference: Palla Likhitha,Rage Uday Kiran, Discovering Top-K Partial Periodic Patterns in Big Temporal Databases https://dl.acm.org/doi/10.1007/978-3-031-39847-6_28 + + :param iFile: str : + Name of the Input file to mine complete set of periodic frequent pattern's + :param oFile: str : + Name of the output file to store complete set of periodic frequent pattern's + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :param iFile: str : + Name of the Input file to mine complete set of frequent pattern's + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param period: str: + Minimum partial periodic... + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + iFile : str + Input file name or path of the input file + k: int + User specified count of top partial periodic patterns + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + oFile : str + Name of the output file or the path of the output file + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + finalPatterns: dict + Storing the complete set of patterns in a dictionary variable + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets() + Scans the dataset or dataframes and stores in list format + frequentOneItem() + Generates one frequent patterns + eclatGeneration(candidateList) + It will generate the combinations of frequent items + generateFrequentPatterns(tidList) + It will generate the combinations of frequent items from a list of items + + **Executing the code on terminal:** + ------------------------------------- + .. code-block:: console + + + Format: + + python3 k3PMiner.py <iFile> <oFile> <k> <period> + + Examples: + + python3 k3PMiner.py sampleDB.txt patterns.txt 10 3 + + + **Sample run of the importing code:** + -------------------------------------- + ... code-block:: python + + import PAMI.partialPeriodicPattern.topk.k3PMiner as alg + + obj = alg.Topk_PPPGrowth(iFile, k, period) + + obj.startMine() + + partialPeriodicPatterns = obj.getPatterns() + + print("Total number of top partial periodic Patterns:", len(partialPeriodicPatterns)) + + obj.save(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + --------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + _startTime = float() + _endTime = float() + _k = int() + _period = " " + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _tidList = {} + _lno = int() + _minimum = int() + _mapSupport = {} + + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + if isinstance(self._iFile, _abstract._pd.DataFrame): + timeStamp, data = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + timeStamp = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + for i in range(len(data)): + tr = [timeStamp[i]] + tr = tr + data[i] + self._Database.append(tr) + self._lno = len(self._Database) + # print(self.Database) + if isinstance(self._iFile, str): + if _validators.url(self._iFile): + data = _urlopen(self._iFile) + for line in data: + self._lno += 1 + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + self._lno += 1 + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _convert(self, value): + """ + To convert the given user specified value + + :param value: user specified value + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + def _frequentOneItem(self): + """ + Generating one frequent patterns + """ + + self._mapSupport = {} + self._tidList = {} + self._period = self._convert(self._period) + self._k = int(self._convert(self._k)) + for line in self._Database: + n = int(line[0]) + for i in range(1, len(line)): + si = line[i] + if self._mapSupport.get(si) is None: + self._mapSupport[si] = [1, 0, n] + self._tidList[si] = [n] + else: + self._mapSupport[si][0] += 1 + period = abs(n - self._mapSupport[si][2]) + if period <= self._period: + self._mapSupport[si][1] += 1 + self._mapSupport[si][2] = n + self._tidList[si].append(n) + for x, y in self._mapSupport.items(): + period = abs(self._lno - self._mapSupport[x][2]) + if period <= self._period: + self._mapSupport[x][1] += 1 + self._mapSupport = {k: v[1] for k, v in self._mapSupport.items()} + #print(self._mapSupport) + plist = [key for key, value in sorted(self._mapSupport.items(), key=lambda x: x[1], reverse=True)] + #print(plist) + self._finalPatterns = {} + for i in plist: + if self._mapSupport[i] == 0: + continue + if len(self._finalPatterns) >= self._k: + break + else: + self._finalPatterns[i] = self._mapSupport[i] + #print(len(self._finalPatterns), self._k, self._periodicity) + #print(self._finalPatterns) + self._minimum = min([self._finalPatterns[i] for i in self._finalPatterns.keys()]) + #print(self._minimum) + plist = list(self._finalPatterns.keys()) + return plist + + def _getSupportAndPeriod(self, timeStamps): + """To calculate the periodicity and support + + :param timeStamps: Timestamps of an item set + :return: support, periodicity + """ + + timeStamps.sort() + sup = 0 + for j in range(len(timeStamps) - 1): + per = abs(timeStamps[j + 1] - timeStamps[j]) + if per <= self._period: + sup += 1 + return sup + + def _save(self, prefix, suffix, tidSetI): + """Saves the patterns that satisfy the periodic frequent property. + + :param prefix: the prefix of a pattern + :type prefix: list + :param suffix: the suffix of a patterns + :type suffix: list + :param tidSetI: the timestamp of a patterns + :type tidSetI: list + """ + + if prefix is None: + prefix = suffix + else: + prefix = prefix + suffix + #print(prefix) + #print(self._minimum) + val = self._getSupportAndPeriod(tidSetI) + sample = str() + for i in prefix: + sample = sample + i + "\t" + if len(self._finalPatterns) < self._k: + if val > self._minimum: + self._finalPatterns[sample] = val + self._finalPatterns = {k: v for k, v in + sorted(self._finalPatterns.items(), key=lambda item: item[1], reverse=True)} + self._minimum = min([self._finalPatterns[i] for i in self._finalPatterns.keys()]) + #print(self._finalPatterns) + else: + #print(prefix, val) + for x, y in sorted(self._finalPatterns.items(), key=lambda x: x[1]): + if val > y: + #print("yes") + del self._finalPatterns[x] + self._finalPatterns[sample] = val + self._finalPatterns = {k: v for k, v in + sorted(self._finalPatterns.items(), key=lambda item: item[1], reverse=True)} + self._minimum = min([self._finalPatterns[i] for i in self._finalPatterns.keys()]) + #print(self._finalPatterns) + return + + def _Generation(self, prefix, itemSets, tidSets): + """Equivalence class is followed and checks for the patterns generated for periodic-frequent patterns. + + :param prefix: main equivalence prefix + :type prefix: periodic-frequent item or pattern + :param itemSets: patterns which are items combined with prefix and satisfying the periodicity + and frequent with their timestamps + :type itemSets: list + :param tidSets: timestamps of the items in the argument itemSets + :type tidSets: list + """ + if len(itemSets) == 1: + i = itemSets[0] + tidI = tidSets[0] + self._save(prefix, [i], tidI) + return + for i in range(len(itemSets)): + itemI = itemSets[i] + if itemI is None: + continue + tidSetI = tidSets[i] + classItemSets = [] + classTidSets = [] + itemSetX = [itemI] + for j in range(i + 1, len(itemSets)): + itemJ = itemSets[j] + tidSetJ = tidSets[j] + y = list(set(tidSetI).intersection(tidSetJ)) + val = self._getSupportAndPeriod(y) + if val > self._minimum: + classItemSets.append(itemJ) + classTidSets.append(y) + newPrefix = list(set(itemSetX)) + prefix + self._Generation(newPrefix, classItemSets, classTidSets) + self._save(prefix, list(set(itemSetX)), tidSetI) + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self): + """ + Main function of the program + + """ + self.mine()
+ + +
+[docs] + def mine(self): + """ + Main function of the program + + """ + self._startTime = _abstract._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._k is None: + raise Exception("Please enter the Minimum Support") + self._creatingItemSets() + plist = self._frequentOneItem() + for i in range(len(plist)): + itemI = plist[i] + tidSetI = self._tidList[itemI] + itemSetX = [itemI] + itemSets = [] + tidSets = [] + for j in range(i + 1, len(plist)): + itemJ = plist[j] + tidSetJ = self._tidList[itemJ] + y1 = list(set(tidSetI).intersection(tidSetJ)) + val = self._getSupportAndPeriod(y1) + if val > self._minimum: + itemSets.append(itemJ) + tidSets.append(y1) + self._Generation(itemSetX, itemSets, tidSets) + print("TopK partial periodic patterns were generated successfully") + self._endTime = _abstract._time.time() + process = _abstract._psutil.Process(_abstract._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self): + """Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataFrame = _abstract._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getPatterns(self): + """ Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + """ This function is used to print the results + """ + print("Top K Partial Periodic Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_sys.argv) == 5 or len(_sys.argv) == 6: + if len(_sys.argv) == 6: + _ap = k3PMiner(_sys.argv[1], _sys.argv[3], _sys.argv[4], _sys.argv[5]) + if len(_sys.argv) == 5: + _ap = k3PMiner(_sys.argv[1], _sys.argv[3], _sys.argv[4]) + _ap.startMine() + print("Top K Partial Periodic Patterns:", len(_ap.getPatterns())) + _ap.save(_sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/partialPeriodicPatternInMultipleTimeSeries/PPGrowth.html b/sphinx/_build/html/_modules/PAMI/partialPeriodicPatternInMultipleTimeSeries/PPGrowth.html new file mode 100644 index 000000000..40fac98f9 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/partialPeriodicPatternInMultipleTimeSeries/PPGrowth.html @@ -0,0 +1,916 @@ + + + + + + PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth

+# PPGrowth is one of the fundamental algorithm to discover periodic-frequent patterns in a transactional database.
+#
+# **Importing this algorithm into a python program**
+#
+#     from PAMI.periodicFrequentPattern.basic import PPGrowth as alg
+#
+#     obj = alg.PPGrowth(iFile, minSup, maxPer)
+#
+#     obj.startMine()
+#
+#     periodicFrequentPatterns = obj.getPatterns()
+#
+#     print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns))
+#
+#     obj.save(oFile)
+#
+#     Df = obj.getPatternsAsDataFrame()
+#
+#     memUSS = obj.getMemoryUSS()
+#
+#     print("Total Memory in USS:", memUSS)
+#
+#     memRSS = obj.getMemoryRSS()
+#
+#     print("Total Memory in RSS", memRSS)
+#
+#     run = obj.getRuntime()
+#
+#     print("Total ExecutionTime in seconds:", run)
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     
+
+"""
+
+import pandas as pd
+from deprecated import deprecated
+from PAMI.partialPeriodicPatternInMultipleTimeSeries import abstract as _ab
+
+
+
+_lno = int()
+_periodicSupport = float()
+_period = float()
+
+class _Node(object):
+    """
+    A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+
+        item : int or None
+            Storing item of a node
+        timeStamps : list
+            To maintain the timestamps of a database at the end of the branch
+        parent : node
+            To maintain the parent of every node
+        children : list
+            To maintain the children of a node
+
+    :Methods:
+
+        addChild(itemName)
+            Storing the children to their respective parent nodes
+        """
+
+    def __init__(self, item, children):
+        """
+        Initializing the Node class
+
+        :param item: Storing the item of a node
+        :type item: int or None
+        :param children: To maintain the children of a node
+        :type children: dict
+        """
+
+        self.item = item
+        self.children = children
+        self.parent = None
+        self.timeStamps = []
+
+    def addChild(self, node):
+        """
+        To add the children to a node
+
+        :param node: parent node in the tree
+        """
+
+        self.children[node.item] = node
+        node.parent = self
+
+
+class _Tree(object):
+    """
+    A class used to represent the frequentPatternGrowth tree structure
+
+
+    :Attributes:
+
+        root : Node
+            Represents the root node of the tree
+        summaries : dictionary
+            Storing the nodes with same item name
+        info : dictionary
+            Stores the support of the items
+
+
+    :Methods:
+
+        addTransactions(Database)
+            Creating transaction as a branch in frequentPatternTree
+        getConditionalPatterns(Node)
+            Generates the conditional patterns from tree for specific node
+        conditionalTransaction(prefixPaths,Support)
+            Takes the prefixPath of a node and support at child of the path and extract the frequent patterns from
+            prefixPaths and generates prefixPaths with items which are frequent
+        remove(Node)
+            Removes the node from tree once after generating all the patterns respective to the node
+        generatePatterns(Node)
+            Starts from the root node of the tree and mines the periodic-frequent patterns
+
+        """
+
+    def __init__(self):
+        self.root = _Node(None, {})
+        self.summaries = {}
+        self.info = {}
+
+    def addTransaction(self, transaction, tid):
+        """
+        Adding a transaction into tree
+
+            :param transaction: To represent the complete database
+            :type transaction: list
+            :param tid: To represent the timestamp of a database
+            :type tid: list
+            :return: pfp-growth tree
+        """
+
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _Node(transaction[i], {})
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].append(newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+        currentNode.timeStamps = currentNode.timeStamps + tid
+
+    def getConditionalPatterns(self, alpha):
+        """
+        Generates all the conditional patterns of a respective node
+
+        :param alpha: To represent a Node in the tree
+        :type alpha: Node
+        :return: A tuple consisting of finalPatterns, conditional pattern base and information
+        """
+        finalPatterns = []
+        finalSets = []
+        for i in self.summaries[alpha]:
+            set1 = i.timeStamps
+            set2 = []
+            while i.parent.item is not None:
+                set2.append(i.parent.item)
+                i = i.parent
+            if len(set2) > 0:
+                set2.reverse()
+                finalPatterns.append(set2)
+                finalSets.append(set1)
+        finalPatterns, finalSets, info = self.conditionalDatabases(finalPatterns, finalSets)
+        return finalPatterns, finalSets, info
+
+    @staticmethod
+    def generateTimeStamps(node):
+        """
+        To get the timestamps of a node
+
+        :param node: A node in the tree
+        :return: Timestamps of a node
+        """
+
+        finalTimeStamps = node.timeStamps
+        return finalTimeStamps
+
+    def removeNode(self, nodeValue):
+        """
+        Removing the node from tree
+
+        :param nodeValue: To represent a node in the tree
+        :type nodeValue: node
+        :return: Tree with their nodes updated with timestamps
+        """
+
+        for i in self.summaries[nodeValue]:
+            i.parent.timeStamps = i.parent.timeStamps + i.timeStamps
+            del i.parent.children[nodeValue]
+
+    def getTimeStamps(self, alpha):
+        """
+        To get all the timestamps of the nodes which share same item name
+
+        :param alpha: Node in a tree
+        :return: Timestamps of a  node
+        """
+        temporary = []
+        for i in self.summaries[alpha]:
+            temporary += i.timeStamps
+        return temporary
+
+    @staticmethod
+    def getSupportAndPeriod(timeStamps):
+        """
+        To calculate the periodicity and support
+
+        :param timeStamps: Timestamps of an item set
+        :return: support, periodicity
+        """
+
+        global _maxPer, _lno,_period,_periodicSupport
+        timeStamps.sort()
+        cur = 0
+        per = list()
+        sup = 0
+        for j in range(len(timeStamps)):
+            timedif=timeStamps[j] - cur
+            per.append(timedif)
+            cur = timeStamps[j]
+            if(_period>=timedif):
+                sup += 1
+        per.append(_lno - cur)
+        if len(per) == 0:
+            return [0, 0]
+        return [sup, max(per)]
+
+    def conditionalDatabases(self, conditionalPatterns, conditionalTimeStamps):
+        """
+        It generates the conditional patterns with periodic-frequent items
+
+        :param conditionalPatterns: conditionalPatterns generated from conditionPattern method of a respective node
+        :type conditionalPatterns: list
+        :param conditionalTimeStamps: Represents the timestamps of a conditional patterns of a node
+        :type conditionalTimeStamps: list
+        :returns: Returns conditional transactions by removing non-periodic and non-frequent items
+        """
+
+        global _periodicSupport,_period
+        pat = []
+        timeStamps = []
+        data1 = {}
+        for i in range(len(conditionalPatterns)):
+            for j in conditionalPatterns[i]:
+                if j in data1:
+                    data1[j] = data1[j] + conditionalTimeStamps[i]
+                else:
+                    data1[j] = conditionalTimeStamps[i]
+        updatedDictionary = {}
+        for m in data1:
+            updatedDictionary[m] = self.getSupportAndPeriod(data1[m])
+        updatedDictionary = {k: v for k, v in updatedDictionary.items() if v[0] >= _periodicSupport and v[1] <= _period}
+        count = 0
+        for p in conditionalPatterns:
+            p1 = [v for v in p if v in updatedDictionary]
+            trans = sorted(p1, key=lambda x: (updatedDictionary.get(x)[0], -x), reverse=True)
+            if len(trans) > 0:
+                pat.append(trans)
+                timeStamps.append(conditionalTimeStamps[count])
+            count += 1
+        return pat, timeStamps, updatedDictionary
+
+    def generatePatterns(self, prefix):
+        """
+        Generates the patterns
+
+        :param prefix: Forms the combination of items
+        :type prefix: list
+        :returns: yields patterns with their support and periodicity
+        """
+
+        for i in sorted(self.summaries, key=lambda x: (self.info.get(x)[0], -x)):
+            pattern = prefix[:]
+            pattern.append(i)
+            yield pattern, self.info[i]
+            patterns, timeStamps, info = self.getConditionalPatterns(i)
+            conditionalTree = _Tree()
+            conditionalTree.info = info.copy()
+            for pat in range(len(patterns)):
+                conditionalTree.addTransaction(patterns[pat], timeStamps[pat])
+            if len(patterns) > 0:
+                for q in conditionalTree.generatePatterns(pattern):
+                    yield q
+            self.removeNode(i)
+
+
+
+[docs] +class PPGrowth(_ab._partialPeriodicPatterns): + """ + About this algorithm + ==================== + + :Description: PPGrowth is one of the fundamental algorithm to discover periodic-frequent patterns in a transactional database. + + :Reference: C. Saideep, R. Uday Kiran, K. Zettsu, P. Fournier-Viger, M. Kitsuregawa and P. Krishna Reddy, + "Discovering Periodic Patterns in Irregular Time Series," 2019 International Conference on Data Mining Workshops (ICDMW), 2019, + pp. 1020-1028, doi: 10.1109/ICDMW.2019.00147. + + :param iFile: str : + Name of the Input file to mine complete set of periodic frequent pattern's + :param oFile: str : + Name of the output file to store complete set of periodic frequent pattern's + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the Input file or path of the input file + oFile : file + Name of the output file or path of the output file + minSup: int or float or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + maxPer: int or float or str + The user can specify maxPer either in count or proportion of database size. + If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + Otherwise, it will be treated as float. + Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + To represent the total no of transaction + tree : class + To represents the Tree class + itemSetCount : int + To represents the total no of patterns + finalPatterns : dict + To store the complete patterns + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of periodic-frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of periodic-frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets(fileName) + Scans the dataset and stores in a list format + PeriodicFrequentOneItem() + Extracts the one-periodic-frequent patterns from database + updateDatabases() + Update the database by removing aperiodic items and sort the Database by item decreased support + buildTree() + After updating the Database, remaining items will be added into the tree by setting root node as null + convert() + to convert the user specified value + + Execution methods + ================= + + + **Terminal command** + + + .. code-block:: console + + Format: + + (.venv) $ python3 PPGrowth.py <inputFile> <outputFile> <minSup> <maxPer> + + Examples: + + (.venv) $ python3 PPGrowth.py sampleTDB.txt patterns.txt 0.3 0.4 + + **Sample run of importing the code:** + ---------------------------------------- + + from PAMI.periodicFrequentPattern.basic import PPGrowth as alg + + obj = alg.PPGrowth(iFile, minSup, maxPer) + + obj.startMine() + + periodicFrequentPatterns = obj.getPatterns() + + print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns)) + + obj.save(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + -------------- + + + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + _startTime = float() + _endTime = float() + _periodicSupport = str() + _period = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _rank = {} + _rankedUp = {} + _lno = 0 + + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + data, ts = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + ts = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + for i in range(len(data)): + tr = [ts[i][0]] + tr = tr + data[i] + self._Database.append(tr) + + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _periodicFrequentOneItem(self): + """ + Calculates the support of each item in the database and assign ranks to the items + by decreasing support and returns the frequent items list + + :returns: return the one-length periodic frequent patterns + """ + global _periodicSupport,_period + data = {} + for tr in self._Database: + for i in range(1, len(tr)): + if tr[i] not in data: + data[tr[i]] = [int(tr[0]), int(tr[0]), 1] + else: + data[tr[i]][0] = max(data[tr[i]][0], (int(tr[0]) - data[tr[i]][1])) + data[tr[i]][1] = int(tr[0]) + if _period>=int(tr[0]) - data[tr[i]][1]: + data[tr[i]][2] += 1 + for key in data: + data[key][0] = max(data[key][0], abs(len(self._Database) - data[key][1])) + data = {k: [v[2], v[0]] for k, v in data.items() if v[2] >= _periodicSupport} + pfList = [k for k, v in sorted(data.items(), key=lambda x: (x[1][0], x[0]), reverse=True)] + self._rank = dict([(index, item) for (item, index) in enumerate(pfList)]) + return data, pfList + + def _updateDatabases(self, dict1): + """ + Remove the items which are not frequent from database and updates the database with rank of items + + :param dict1: frequent items with support + :type dict1: dictionary + :return: Sorted and updated transactions + """ + list1 = [] + for tr in self._Database: + list2 = [int(tr[0])] + for i in range(1, len(tr)): + if tr[i] in dict1: + list2.append(self._rank[tr[i]]) + if len(list2) >= 2: + basket = list2[1:] + basket.sort() + list2[1:] = basket[0:] + list1.append(list2) + return list1 + + @staticmethod + def _buildTree(data, info): + """ + It takes the database and support of an each item and construct the main tree by setting root node as a null + + :param data: it represents the one Databases in database + :type data: list + :param info: it represents the support of each item + :type info: dictionary + :return: returns root node of tree + """ + + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(data)): + set1 = [data[i][0]] + rootNode.addTransaction(data[i][1:], set1) + return rootNode + + def _savePeriodic(self, itemSet,change): + """ + To convert the ranks of items in to their original item names + + :param itemSet: frequent pattern + :return: frequent pattern with original item names + """ + t1 = str() + for i in itemSet: + t1 = str(t1) + change[(self._rankedUp[i])] + "\t" + return t1 + + def _convert(self, value): + """ + To convert the given user specified value + + :param value: user specified value + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + def _convertNumber(self): + """ + To convert the given number + """ + changeDic={} + rechangeDic={} + newDatabase=[] + count=0 + for i in self._Database: + line=[int(i[0])] + for j in i[1:]: + if j not in changeDic: + changeDic[j]=count + rechangeDic[count]=j + line.append(count) + count=count+1 + else: + line.append(changeDic[j]) + newDatabase.append(line) + self._Database=newDatabase + return rechangeDic + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self): + """ + Mining process will start from this function + """ + + global _minSup, _maxPer, _lno,_period,_periodicSupport + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._periodicSupport is None: + raise Exception("Please enter the Periodic Support") + self._creatingItemSets() + changeDic = self._convertNumber() + self._periodicSupport = self._convert(self._periodicSupport) + self._period = self._convert(self._period) + _periodicSupport, _period, _lno = self._periodicSupport, self._period, len(self._Database) + if self._periodicSupport > len(self._Database): + raise Exception("Please enter the minSup in range between 0 to 1") + + generatedItems, pfList = self._periodicFrequentOneItem() + updatedDatabases = self._updateDatabases(generatedItems) + self._rankedUp={y:x for x, y in self._rank.items()} + info = {self._rank[k]: v for k, v in generatedItems.items()} + Tree = self._buildTree(updatedDatabases, info) + patterns = Tree.generatePatterns([]) + + self._finalPatterns = {} + self._finalPatterns={self._savePeriodic(i[0],changeDic):i[1]for i in patterns} + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Periodic Frequent patterns were generated successfully using PPGrowth algorithm ")
+ + +
+[docs] + def Mine(self): + """ + Mining process will start from this function + """ + + global _minSup, _maxPer, _lno,_period,_periodicSupport + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._periodicSupport is None: + raise Exception("Please enter the Periodic Support") + self._creatingItemSets() + changeDic = self._convertNumber() + self._periodicSupport = self._convert(self._periodicSupport) + self._period = self._convert(self._period) + _periodicSupport, _period, _lno = self._periodicSupport, self._period, len(self._Database) + if self._periodicSupport > len(self._Database): + raise Exception("Please enter the minSup in range between 0 to 1") + + generatedItems, pfList = self._periodicFrequentOneItem() + updatedDatabases = self._updateDatabases(generatedItems) + self._rankedUp={y:x for x, y in self._rank.items()} + info = {self._rank[k]: v for k, v in generatedItems.items()} + Tree = self._buildTree(updatedDatabases, info) + patterns = Tree.generatePatterns([]) + + self._finalPatterns = {} + self._finalPatterns={self._savePeriodic(i[0],changeDic):i[1]for i in patterns} + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Periodic Frequent patterns were generated successfully using PPGrowth algorithm ")
+ + +
+[docs] + def getMemoryUSS(self): + """Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """Calculating the total amount of runtime taken by the mining process + + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """Storing final periodic-frequent patterns in a dataframe + + :return: returning periodic-frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b[0], b[1]]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Periodicity']) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of periodic-frequent patterns will be loaded in to a output file + + :param outFile: name of the output file + :type outFile: file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.strip() + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self): + """ Function to send the set of periodic-frequent patterns after completion of the mining process + + :return: returning periodic-frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Total number of Partial Periodic Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = PPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = PPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + print("Total number of Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/periodicCorrelatedPattern/basic/EPCPGrowth.html b/sphinx/_build/html/_modules/PAMI/periodicCorrelatedPattern/basic/EPCPGrowth.html new file mode 100644 index 000000000..1f164579c --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/periodicCorrelatedPattern/basic/EPCPGrowth.html @@ -0,0 +1,869 @@ + + + + + + PAMI.periodicCorrelatedPattern.basic.EPCPGrowth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.periodicCorrelatedPattern.basic.EPCPGrowth

+# EPCPGrowth is an algorithm to discover periodic-Correlated patterns in a temporal database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#     from PAMI.periodicCorrelatedPattern.basic import EPCPGrowth as alg
+#
+#     obj = alg.EPCPGrowth(iFile, minSup, minAllCOnf, maxPer, maxPerAllConf)
+#
+#     obj.startMine()
+#
+#     periodicCorrelatedPatterns = obj.getPatterns()
+#
+#     print("Total number of Periodic Frequent Patterns:", len(periodicCorrelatedPatterns))
+#
+#     obj.save(oFile)
+#
+#     Df = obj.getPatternsAsDataFrame()
+#
+#     memUSS = obj.getMemoryUSS()
+#
+#     print("Total Memory in USS:", memUSS)
+#
+#     memRSS = obj.getMemoryRSS()
+#
+#     print("Total Memory in RSS", memRSS)
+#
+#     run = obj.getRuntime()
+#
+#     print("Total ExecutionTime in seconds:", run)
+#
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+
+import sys
+
+from PAMI.periodicCorrelatedPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+import pandas as pd
+
+_maxPer = float()
+_minAllConf = float()
+_minSup = float()
+_maxPerAllConf = float()
+_frequentList = {}
+_lno = int()
+
+
+class _Node(object):
+    """
+    A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+
+        item : int or None
+            Storing item of a node
+        timeStamps : list
+            To maintain the timestamps of a database at the end of the branch
+        parent : node
+            To maintain the parent of every node
+        children : list
+            To maintain the children of a node
+
+    :Methods:
+
+        addChild(itemName)
+            Storing the children to their respective parent nodes
+        """
+
+    def __init__(self, item, children) -> None:
+        """
+        Initializing the Node class
+
+        :param item: Storing the item of a node
+        :type item: int or None
+        :param children: To maintain the children of a node
+        :type children: dict
+        """
+
+        self.item = item
+        self.children = children
+        self.parent = None
+        self.timeStamps = []
+
+    def addChild(self, node) -> None:
+        """
+        To add the children to a node
+
+        :param node: parent node in the tree
+        """
+
+        self.children[node.item] = node
+        node.parent = self
+
+
+class _Tree(object):
+    """
+    A class used to represent the frequentPatternGrowth tree structure
+
+    :Attributes:
+
+        root : Node
+            Represents the root node of the tree
+        summaries : dictionary
+            Storing the nodes with same item name
+        info : dictionary
+            Stores the support of the items
+
+
+    :Methods:
+
+        addTransactions(Database)
+            Creating transaction as a branch in frequentPatternTree
+        getConditionalPatterns(Node)
+            Generates the conditional patterns from tree for specific node
+        conditionalTransaction(prefixPaths,Support)
+            Takes the prefixPath of a node and support at child of the path and extract the frequent patterns from
+            prefixPaths and generates prefixPaths with items which are frequent
+        remove(Node)
+            Removes the node from tree once after generating all the patterns respective to the node
+        generatePatterns(Node)
+            Starts from the root node of the tree and mines the periodic-frequent patterns
+
+    """
+
+    def __init__(self) -> None:
+        self.root = _Node(None, {})
+        self.summaries = {}
+        self.info = {}
+
+    def addTransaction(self, transaction, tid) -> None:
+        """
+        Adding a transaction into tree
+
+        :param transaction: To represent the complete database
+        :type transaction: list
+        :param tid: To represent the timestamp of a database
+        :type tid: list
+        :return: pfp-growth tree
+        """
+
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _Node(transaction[i], {})
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].append(newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+        currentNode.timeStamps = currentNode.timeStamps + tid
+
+    def getConditionalPatterns(self, alpha, pattern) -> tuple:
+        """
+        Generates all the conditional patterns of a respective node
+
+        :param alpha: To represent a Node in the tree
+        :type alpha: Node
+        :return: A tuple consisting of finalPatterns, conditional pattern base and information
+        """
+        finalPatterns = []
+        finalSets = []
+        for i in self.summaries[alpha]:
+            set1 = i.timeStamps
+            set2 = []
+            while i.parent.item is not None:
+                set2.append(i.parent.item)
+                i = i.parent
+            if len(set2) > 0:
+                set2.reverse()
+                finalPatterns.append(set2)
+                finalSets.append(set1)
+        finalPatterns, finalSets, info = self.conditionalDatabases(finalPatterns, finalSets, pattern)
+        return finalPatterns, finalSets, info
+
+    @staticmethod
+    def generateTimeStamps(node) -> list:
+        """
+        To get the timestamps of a node
+
+        :param node: A node in the tree
+        :return: Timestamps of a node
+        """
+
+        finalTimeStamps = node.timeStamps
+        return finalTimeStamps
+
+    def removeNode(self, nodeValue) -> None:
+        """
+        Removing the node from tree
+
+        :param nodeValue: To represent a node in the tree
+        :type nodeValue: node
+        :return: Tree with their nodes updated with timestamps
+        """
+
+        for i in self.summaries[nodeValue]:
+            i.parent.timeStamps = i.parent.timeStamps + i.timeStamps
+            del i.parent.children[nodeValue]
+
+    def getTimeStamps(self, alpha) -> list:
+        """
+        To get all the timestamps of the nodes which share same item name
+
+        :param alpha: Node in a tree
+        :return: Timestamps of a  node
+        """
+        temporary = []
+        for i in self.summaries[alpha]:
+            temporary += i.timeStamps
+        return temporary
+
+    @staticmethod
+    def getSupportAndPeriod(timeStamps, pattern) -> list:
+        """
+        To calculate the periodicity and support
+
+        :param timeStamps: Timestamps of an item set
+        :return: support, periodicity
+        """
+
+        global _minSup, _minAllConf, _maxPer, _maxPerAllConf, _frequentList, _lno
+        timeStamps.sort()
+        cur = 0
+        per = list()
+        sup = 0
+        for j in range(len(timeStamps)):
+            per.append(timeStamps[j] - cur)
+            cur = timeStamps[j]
+            sup += 1
+        per.append(_lno - cur)
+        if len(per) == 0:
+            return [0, 0, 0, 0]
+        l = []
+        for i in pattern:
+            l.append(_frequentList[i][0])
+        l1 = []
+        for i in pattern:
+            l1.append(_frequentList[i][1])
+        conf = sup/max(l)
+        perConf = max(per)/min(l1)
+        #print(pattern, timeStamps, l, l1, sup, max(per), conf, perConf)
+        return [sup, max(per), conf, perConf]
+
+    def conditionalDatabases(self, conditionalPatterns: list, conditionalTimeStamps: list, pattern) -> tuple:
+        """
+        It generates the conditional patterns with periodic-frequent items
+
+        :param conditionalPatterns: conditionalPatterns generated from conditionPattern method of a respective node
+        :type conditionalPatterns: list
+        :param conditionalTimeStamps: Represents the timestamps of a conditional patterns of a node
+        :type conditionalTimeStamps: list
+        :returns: Returns conditional transactions by removing non-periodic and non-frequent items
+        """
+        global _maxPer, _minSup
+        temp = pattern
+        pat = []
+        timeStamps = []
+        data1 = {}
+        for i in range(len(conditionalPatterns)):
+            for j in conditionalPatterns[i]:
+                if j in data1:
+                    data1[j] = data1[j] + conditionalTimeStamps[i]
+                else:
+                    data1[j] = conditionalTimeStamps[i]
+        updatedDictionary = {}
+        for m in data1:
+            updatedDictionary[m] = self.getSupportAndPeriod(data1[m], pattern + [m])
+        updatedDictionary = {k: v for k, v in updatedDictionary.items() if v[0] >= _minSup and v[1] <= _maxPer}
+        count = 0
+        for p in conditionalPatterns:
+            p1 = [v for v in p if v in updatedDictionary]
+            trans = sorted(p1, key=lambda x: (updatedDictionary.get(x)[0], -x), reverse=True)
+            if len(trans) > 0:
+                pat.append(trans)
+                timeStamps.append(conditionalTimeStamps[count])
+            count += 1
+        return pat, timeStamps, updatedDictionary
+
+    def generatePatterns(self, prefix: list) -> Generator:
+        """
+        Generates the patterns
+
+        :param prefix: Forms the combination of items
+        :type prefix: list
+        :returns: yields patterns with their support and periodicity
+        """
+        global _minSup, _minAllConf, _maxPer, _maxPerAllConf
+        for i in sorted(self.summaries, key=lambda x: (self.info.get(x)[0], -x)):
+            pattern = prefix[:]
+            pattern.append(i)
+            #print(pattern, self.info[i][0], self.info[i][1], self.info[i][2], self.info[i][3])
+            if self.info[i][0] >= _minSup and self.info[i][1] <= _maxPer and self.info[i][2] >= _minAllConf and self.info[i][3] <= _maxPerAllConf:
+                yield pattern, self.info[i]
+                patterns, timeStamps, info = self.getConditionalPatterns(i, pattern)
+                conditionalTree = _Tree()
+                conditionalTree.info = info.copy()
+                for pat in range(len(patterns)):
+                    conditionalTree.addTransaction(patterns[pat], timeStamps[pat])
+                if len(patterns) > 0:
+                    for q in conditionalTree.generatePatterns(pattern):
+                        yield q
+            self.removeNode(i)
+
+
+
+[docs] +class EPCPGrowth(_ab._periodicCorrelatedPatterns): + """ + :Description: EPCPGrowth is an algorithm to discover periodic-Correlated patterns in a temporal database. + + :Reference: http://www.tkl.iis.u-tokyo.ac.jp/new/uploads/publication_file/file/897/Venkatesh2018_Chapter_DiscoveringPeriodic-Correlated.pdf + + :Attributes: + + iFile : file + Name of the Input file or path of the input file + oFile : file + Name of the output file or path of the output file + minSup : int or float or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + minAllConf : int or float or str + The user can specify minAllConf either in count or proportion of database size. + If the program detects the data type of minAllConf is integer, then it treats minAllCOnf is expressed in count. + Otherwise, it will be treated as float. + Example: minAllCOnf=10 will be treated as integer, while minAllConf=10.0 will be treated as float + maxPer : int or float or str + The user can specify maxPer either in count or proportion of database size. + If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + Otherwise, it will be treated as float. + Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float + maxPerAllConf : int or float or str + The user can specify maxPerAllConf either in count or proportion of database size. + If the program detects the data type of maaxPerAllConf is integer, then it treats maxPerAllConf is expressed in count. + Otherwise, it will be treated as float. + Example : maxPerAllConf=10 will be treated as integer, while maxPerAllConf=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime : float + To record the start time of the mining process + endTime : float + To record the completion time of the mining process + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + To represent the total no of transaction + tree : class + To represents the Tree class + itemSetCount : int + To represents the total no of patterns + finalPatterns : dict + To store the complete patterns + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of periodic-frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of periodic-frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets(fileName) + Scans the dataset and stores in a list format + PeriodicFrequentOneItem() + Extracts the one-periodic-frequent patterns from database + updateDatabases() + Update the database by removing aperiodic items and sort the Database by item decreased support + buildTree() + After updating the Database, remaining items will be added into the tree by setting root node as null + convert() + to convert the user specified value + + **Executing the code on terminal:** + --------------------------------------- + Format: + >>> python3 PFPGrowth.py <inputFile> <outputFile> <minSup> <maxPer> + + Examples: + >>> python3 PFPGrowth.py sampleTDB.txt patterns.txt 0.3 0.4 + + **Sample run of importing the code:** + ---------------------------------------- + .. code-block:: python + + from PAMI.periodicCorrelatedPattern.basic import EPCPGrowth as alg + + obj = alg.EPCPGrowth(iFile, minSup, minAllCOnf, maxPer, maxPerAllConf) + + obj.startMine() + + periodicCorrelatedPatterns = obj.getPatterns() + + print("Total number of Periodic Frequent Patterns:", len(periodicCorrelatedPatterns)) + + obj.save(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + -------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.\n + + """ + _startTime = float() + _endTime = float() + _minSup = str() + _minAllCOnf = float() + _maxPer = float() + _maxPerAllConf = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _rank = {} + _rankedUp = {} + _lno = 0 + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + data, ts = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + ts = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + for i in range(len(data)): + tr = [ts[i][0]] + tr = tr + data[i] + self._Database.append(tr) + + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _periodicFrequentOneItem(self) -> tuple: + """ + Calculates the support of each item in the database and assign ranks to the items + by decreasing support and returns the frequent items list + + :returns: return the one-length periodic frequent patterns + """ + global _frequentList + data = {} + for tr in self._Database: + for i in range(1, len(tr)): + if tr[i] not in data: + data[tr[i]] = [int(tr[0]), int(tr[0]), 1] + else: + data[tr[i]][0] = max(data[tr[i]][0], (int(tr[0]) - data[tr[i]][1])) + data[tr[i]][1] = int(tr[0]) + data[tr[i]][2] += 1 + for key in data: + data[key][0] = max(data[key][0], abs(len(self._Database) - data[key][1])) + data = {k: [v[2], v[0], 1, 1] for k, v in data.items() if v[0] <= self._maxPer and v[2] >= self._minSup} + pfList = [k for k, v in sorted(data.items(), key=lambda x: (x[1][0], x[0]), reverse=True)] + self._rank = dict([(index, item) for (item, index) in enumerate(pfList)]) + for x, y in self._rank.items(): + _frequentList[y] = data[x] + return data, pfList + + def _updateDatabases(self, dict1) -> list: + """ + Remove the items which are not frequent from database and updates the database with rank of items + + :param dict1: frequent items with support + :type dict1: dictionary + :return: Sorted and updated transactions + """ + list1 = [] + for tr in self._Database: + list2 = [int(tr[0])] + for i in range(1, len(tr)): + if tr[i] in dict1: + list2.append(self._rank[tr[i]]) + if len(list2) >= 2: + basket = list2[1:] + basket.sort() + list2[1:] = basket[0:] + list1.append(list2) + return list1 + + @staticmethod + def _buildTree(data, info) -> _Tree: + """ + It takes the database and support of each item and construct the main tree by setting root node as a null + + :param data: it represents the one Database in database + :type data: list + :param info: it represents the support of each item + :type info: dictionary + :return: returns root node of tree + """ + + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(data)): + set1 = [data[i][0]] + rootNode.addTransaction(data[i][1:], set1) + return rootNode + + def _savePeriodic(self, itemSet) -> str: + """ + To convert the ranks of items in to their original item names + + :param itemSet: frequent pattern. + :return: frequent pattern with original item names + """ + t1 = str() + for i in itemSet: + t1 = t1 + self._rankedUp[i] + "\t" + return t1 + + def _convert(self, value) -> float: + """ + To convert the given user specified value + + :param value: user specified value + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + +
+[docs] + def startMine(self) -> None: + """ + Mining process will start from this function + """ + + global _minSup, _maxPer, _minAllConf, _maxPerAllConf, _lno + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minSup is None: + raise Exception("Please enter the Minimum Support") + self._creatingItemSets() + self._minSup = self._convert(self._minSup) + self._minAllConf = float(self._minAllConf) + self._maxPer = self._convert(self._maxPer) + self._maxPerAllConf = float(self._maxPerAllConf) + _minSup, _minAllConf, _maxPer, _maxPerAllConf, _lno = self._minSup, self._minAllConf, self._maxPer, self._maxPerAllConf, len(self._Database) + #print(_minSup, _minAllConf, _maxPer, _maxPerAllConf) + if self._minSup > len(self._Database): + raise Exception("Please enter the minSup in range between 0 to 1") + generatedItems, pfList = self._periodicFrequentOneItem() + updatedDatabases = self._updateDatabases(generatedItems) + for x, y in self._rank.items(): + self._rankedUp[y] = x + info = {self._rank[k]: v for k, v in generatedItems.items()} + Tree = self._buildTree(updatedDatabases, info) + patterns = Tree.generatePatterns([]) + self._finalPatterns = {} + for i in patterns: + sample = self._savePeriodic(i[0]) + self._finalPatterns[sample] = i[1] + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Correlated Periodic-Frequent patterns were generated successfully using EPCPGrowth algorithm ")
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> pd.DataFrame: + """ + Storing final periodic-frequent patterns in a dataframe + + :return: returning periodic-frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b[0], b[1], b[2], b[3]]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Periodicity', 'allConf', 'maxPerAllConf']) + return dataFrame
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of periodic-frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.strip() + ":" + str(y[0]) + ":" + str(y[1]) + ":" + str(y[2]) + ":" + str(y[3]) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> dict: + """ Function to send the set of periodic-frequent patterns after completion of the mining process + + :return: returning periodic-frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print thr results + """ + print("Total number of Correlated Periodic-Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 7 or len(_ab._sys.argv) == 8: + if len(_ab._sys.argv) == 8: + _ap = EPCPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5], sys.argv[6], sys.argv[7]) + if len(_ab._sys.argv) == 7: + _ap = EPCPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], sys.argv[5], sys.argv[6]) + _ap.startMine() + print("Total number of Correlated Periodic-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") + + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/basic/PFECLAT.html b/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/basic/PFECLAT.html new file mode 100644 index 000000000..49afcf2cc --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/basic/PFECLAT.html @@ -0,0 +1,598 @@ + + + + + + PAMI.periodicFrequentPattern.basic.PFECLAT — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.periodicFrequentPattern.basic.PFECLAT

+# PFECLAT is the fundamental approach to mine the periodic-frequent patterns.
+#
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.periodicFrequentPattern.basic import PFECLAT as alg
+#
+#             obj = alg.PFECLAT("../basic/sampleTDB.txt", "2", "5")
+#
+#             obj.startMine()
+#
+#             periodicFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns))
+#
+#             obj.save("patterns")
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.periodicFrequentPattern.basic import abstract as _ab
+import pandas as pd
+from deprecated import deprecated
+import numpy as np
+
+from PAMI.periodicFrequentPattern.basic import abstract as _ab
+
+
+
+[docs] +class PFECLAT(_ab._periodicFrequentPatterns): + """ + :Description: PFECLAT is the fundamental approach to mine the periodic-frequent patterns. + + :Reference: P. Ravikumar, P.Likhitha, R. Uday kiran, Y. Watanobe, and Koji Zettsu, "Towards efficient discovery of + periodic-frequent patterns in columnar temporal databases", 2021 IEA/AIE. + + :param iFile: str : + Name of the Input file to mine complete set of periodic frequent pattern's + :param oFile: str : + Name of the output file to store complete set of periodic frequent pattern's + :param minSup: str: + Controls the minimum number of transactions in which every item must appear in a database. + :param maxPer: str: + Controls the maximum number of transactions in which any two items within a pattern can reappear. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + iFile : file + Name of the Input file or path of the input file + oFile : file + Name of the output file or path of the output file + minSup : int or float or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + maxPer : int or float or str + The user can specify maxPer either in count or proportion of database size. + If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + Otherwise, it will be treated as float. + Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime : float + To record the start time of the mining process + endTime : float + To record the completion time of the mining process + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transactions + tree : class + it represents the Tree class + itemSetCount : int + it represents the total no of patterns + finalPatterns : dict + it represents to store the patterns + tidList : dict + stores the timestamps of an item + hashing : dict + stores the patterns with their support to check for the closed property + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of periodic-frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of periodic-frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingOneItemSets() + Scan the database and store the items with their timestamps which are periodic frequent + getPeriodAndSupport() + Calculates the support and period for a list of timestamps. + Generation() + Used to implement prefix class equivalence method to generate the periodic patterns recursively + + + **Methods to execute code on terminal** + ------------------------------------------ + .. code-block:: console + + + Format: + + (.venv) $ python3 PFECLAT.py <inputFile> <outputFile> <minSup> + + Example usage: + + (.venv) $ python3 PFECLAT.py sampleDB.txt patterns.txt 10.0 + + + + .. note:: minSup will be considered in percentage of database transactions + + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.periodicFrequentPattern.basic import PFECLAT as alg + + obj = alg.PFECLAT("../basic/sampleTDB.txt", "2", "5") + + obj.startMine() + + periodicFrequentPatterns = obj.getPatterns() + + print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns)) + + obj.save("patterns") + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + -------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + _iFile = " " + _oFile = " " + _sep = " " + _dbSize = None + _Database = None + _minSup = str() + _maxPer = str() + _tidSet = set() + _finalPatterns = {} + _startTime = None + _endTime = None + _memoryUSS = float() + _memoryRSS = float() + + def _convert(self, value) -> float: + """ + To convert the given user specified value + + :param value: user specified value + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (self._dbSize * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (self._dbSize * value) + else: + value = int(value) + return value + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + :return: None + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + data, ts = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + ts = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + for i in range(len(data)): + tr = [ts[i][0]] + tr = tr + data[i] + self._Database.append(tr) + + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self) -> None: + """ + Mining process will start from this function + :return: None + """ + self.Mine()
+ + # self._startTime = _ab._time.time() + # self._finalPatterns = {} + # frequentSets = self._creatingOneItemSets() + # self._generateEclat(frequentSets) + # self._endTime = _ab._time.time() + # process = _ab._psutil.Process(_ab._os.getpid()) + # self._memoryRSS = float() + # self._memoryUSS = float() + # self._memoryUSS = process.memory_full_info().uss + # self._memoryRSS = process.memory_info().rss + # print("Periodic-Frequent patterns were generated successfully using PFECLAT algorithm ") + + def _getMaxPer(self, arr, maxTS): + arr = np.append(list(arr), [0, maxTS]) + arr = np.sort(arr) + arr = np.diff(arr) + + return np.max(arr) + +
+[docs] + def Mine(self) -> None: + """ + Mining process will start from this function + :return: None + """ + self._startTime = _ab._time.time() + self._finalPatterns = {} + frequentSets = self._creatingItemSets() + + items = {} + maxTS = 0 + for line in self._Database: + index = int(line[0]) + maxTS = max(maxTS, index) + for item in line[1:]: + if tuple([item]) not in items: + items[tuple([item])] = set() + items[tuple([item])].add(index) + + self._dbSize = maxTS + + self._minSup = self._convert(self._minSup) + self._maxPer = self._convert(self._maxPer) + minSup = self._minSup + maxPer = self._maxPer + + + items = {k: v for k, v in items.items() if len(v) >= minSup} + items = {k: v for k, v in sorted(items.items(), key = lambda x: len(x[1]), reverse = True)} + + keys = [] + for item in list(items.keys()): + per = self._getMaxPer(items[item], maxTS) + if per <= maxPer: + keys.append(item) + self._finalPatterns[item] = [len(items[item]), per, set(items[item])] + + while keys: + newKeys = [] + for i in range(len(keys)): + for j in range(i + 1, len(keys)): + if keys[i][:-1] == keys[j][:-1] and keys[i][-1] != keys[j][-1]: + # print(keys[i], keys[j]) + newKey = tuple(keys[i] + (keys[j][-1],)) + intersect = items[keys[i]].intersection(items[keys[j]]) + per = self._getMaxPer(intersect, maxTS) + sup = len(intersect) + if sup >= minSup and per <= maxPer: + items[newKey] = intersect + newKeys.append(newKey) + self._finalPatterns[newKey] = [sup, per, set(intersect)] + else: + break + keys = newKeys + + newPattern = {} + for k, v in self._finalPatterns.items(): + newPattern["\t".join([str(x) for x in k])] = v + + self._finalPatterns = newPattern + + # self._generateEclat(frequentSets) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryRSS = float() + self._memoryUSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Periodic-Frequent patterns were generated successfully using PFECLAT algorithm ")
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """Calculating the total amount of runtime taken by the mining process + + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final periodic-frequent patterns in a dataframe + + :return: returning periodic-frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataframe = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a, b[0], b[1]]) + dataframe = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Periodicity']) + return dataframe
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of periodic-frequent patterns will be loaded in to a output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x + ":" + str(y[0]) + ":" + str(y[1]) + #s1 = x.replace(' ', '\t') + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> dict: + """ + Function to send the set of periodic-frequent patterns after completion of the mining process + + :return: returning periodic-frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + :return: None + """ + print("Total number of Periodic Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = PFECLAT(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = PFECLAT(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + print("Total number of Periodic-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/basic/PFPGrowth.html b/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/basic/PFPGrowth.html new file mode 100644 index 000000000..5e5c5be68 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/basic/PFPGrowth.html @@ -0,0 +1,718 @@ + + + + + + PAMI.periodicFrequentPattern.basic.PFPGrowth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.periodicFrequentPattern.basic.PFPGrowth

+# PFPGrowth is one of the fundamental algorithm to discover periodic-frequent patterns in a transactional database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.periodicFrequentPattern.basic import PFPGrowth as alg
+#
+#             obj = alg.PFPGrowth(iFile, minSup, maxPer)
+#
+#             obj.startMine()
+#
+#             periodicFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.periodicFrequentPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+
+import pandas as pd
+from deprecated import deprecated
+import numpy as np
+
+_maxPer = float()
+_minSup = float()
+_lno = int()
+
+
+class _Node(object):
+    """
+    A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+
+        item : int or None
+            Storing item of a node
+        timeStamps : list
+            To maintain the timestamps of a database at the end of the branch
+        parent : node
+            To maintain the parent of every node
+        children : list
+            To maintain the children of a node
+
+    :Methods:
+
+        addChild(itemName)
+            Storing the children to their respective parent nodes
+        """
+
+    def __init__(self, item, locations, parent=None):
+        self.item = item
+        self.locations = locations
+        self.parent = parent
+        self.children = {}
+
+    def addChild(self, item, locations):
+        if item not in self.children:
+            self.children[item] = _Node(item, locations, self)
+        else:
+            self.children[item].locations = locations + self.children[item].locations
+            
+        return self.children[item]
+
+    def traverse(self):
+        transaction = []
+        locs = self.locations
+        node = self.parent
+        while node.parent is not None:
+            transaction.append(node.item)
+            node = node.parent
+        return transaction[::-1], locs
+
+    def traverse(self):
+        transaction = []
+        locs = self.locations
+        node = self.parent
+        while node.parent is not None:
+            transaction.append(node.item)
+            node = node.parent
+        return transaction[::-1], locs
+
+
+[docs] +class PFPGrowth(_ab._periodicFrequentPatterns): + """ + :Description: PFPGrowth is one of the fundamental algorithm to discover periodic-frequent patterns in a transactional database. + + :Reference: Syed Khairuzzaman Tanbeer, Chowdhury Farhan, Byeong-Soo Jeong, and Young-Koo Lee, "Discovering Periodic-Frequent + Patterns in Transactional Databases", PAKDD 2009, https://doi.org/10.1007/978-3-642-01307-2_24 + + :param iFile: str : + Name of the Input file to mine complete set of periodic frequent pattern's + :param oFile: str : + Name of the output file to store complete set of periodic frequent pattern's + :param minSup: str: + Controls the minimum number of transactions in which every item must appear in a database. + :param maxPer: float: + Controls the maximum number of transactions in which any two items within a pattern can reappear. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the Input file or path of the input file + oFile : file + Name of the output file or path of the output file + minSup : int or float or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + maxPer : int or float or str + The user can specify maxPer either in count or proportion of database size. + If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + Otherwise, it will be treated as float. + Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + To represent the total no of transaction + tree : class + To represents the Tree class + itemSetCount : int + To represents the total no of patterns + finalPatterns : dict + To store the complete patterns + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of periodic-frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of periodic-frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets(fileName) + Scans the dataset and stores in a list format + PeriodicFrequentOneItem() + Extracts the one-periodic-frequent patterns from database + updateDatabases() + Update the database by removing aperiodic items and sort the Database by item decreased support + buildTree() + After updating the Database, remaining items will be added into the tree by setting root node as null + convert() + to convert the user specified value + + + + **Credits:** + -------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + """ + _startTime = float() + _endTime = float() + _minSup = str() + _maxPer = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _rank = {} + _rankedUp = {} + _lno = 0 + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + :return: None + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + data, ts = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + ts = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + for i in range(len(data)): + tr = [ts[i][0]] + tr = tr + data[i] + self._Database.append(tr) + + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _convert(self, value) -> int: + """ + To convert the given user specified value + + :param value: user specified value + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self) -> None: + """ + Mining process will start from this function + :return: None + """ + + self.Mine()
+ + + def _getMaxPer(self, arr, maxTS): + arr = np.append(arr, [0, maxTS]) + arr = np.sort(arr) + arr = np.diff(arr) + + return np.max(arr) + + def _construct(self, items, data, minSup, maxPer, maxTS, patterns): + + # maxPerItems = {k: self.getMaxPer(v, maxTS) for k, v in items.items() if len(v) >= minSup} + + items = {k: v for k, v in items.items() if len(v) >= minSup and self._getMaxPer(v, maxTS) <= maxPer} + + #tested ok + for item, ts in items.items(): + # pat = "\t".join(item) + # self.patCount += 1 + # patterns[pat] = (len(ts), self.getMaxPer(ts, maxTS)) + patterns[tuple([item])] = [len(ts), self._getMaxPer(ts, maxTS)] + + root = _Node([], None, None) + itemNodes = {} + for line in data: + currNode = root + index = int(line[0]) + line = line[1:] + line = sorted([item for item in line if item in items], key = lambda x: len(items[x]), reverse = True) + for item in line: + currNode = currNode.addChild(item, [index]) # heavy + if item in itemNodes: + itemNodes[item].add(currNode) + else: + itemNodes[item] = set([currNode]) + + return root, itemNodes + + + def _recursive(self, root, itemNode, minSup, maxPer, patterns, maxTS): + + for item in itemNode: + newRoot = _Node(root.item + [item], None, None) + + itemLocs = {} + transactions = {} + for node in itemNode[item]: + transaction, locs = node.traverse() + if len(transaction) < 1: + continue + # transactions.append((transaction, locs)) + if tuple(transaction) in transactions: + transactions[tuple(transaction)].extend(locs) + else: + transactions[tuple(transaction)] = locs + + for item in transaction: + if item in itemLocs: + itemLocs[item] += locs + else: + itemLocs[item] = list(locs) + + # Precompute getMaxPer results for itemLocs + maxPerResults = {item: self._getMaxPer(itemLocs[item], maxTS) for item in itemLocs if len(itemLocs[item]) >= minSup} + + # Filter itemLocs based on minSup and maxPer + itemLocs = {k: len(v) for k, v in itemLocs.items() if k in maxPerResults and maxPerResults[k] <= maxPer} + + # Iterate over filtered itemLocs + for item in itemLocs: + # pat = "\t".join([str(x) for x in newRoot.item + [item]]) + # self.patCount += 1 + # patterns[pat] = [itemLocs[item], maxPerResults[item]] + patterns[tuple(newRoot.item + [item])] = [itemLocs[item], maxPerResults[item]] + + if not itemLocs: + continue + + newItemNodes = {} + + for transaction, locs in transactions.items(): + transaction = sorted([item for item in transaction if item in itemLocs], key = lambda x: itemLocs[x], reverse = True) + if len(transaction) < 1: + continue + currNode = newRoot + for item in transaction: + currNode = currNode.addChild(item, locs) + if item in newItemNodes: + newItemNodes[item].add(currNode) + else: + newItemNodes[item] = set([currNode]) + + self._recursive(newRoot, newItemNodes, minSup, maxPer, patterns, _lno) + +
+[docs] + def Mine(self) -> None: + """ + Mining process will start from this function + :return: None + """ + + global _minSup, _maxPer, _lno + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minSup is None: + raise Exception("Please enter the Minimum Support") + if self._maxPer is None: + raise Exception("Please enter the Maximum Periodicity") + if self._sep is None: + raise Exception("Default separator is tab space, please enter the separator if you have different separator in the input file") + + self._creatingItemSets() + self._minSup = self._convert(self._minSup) + self._maxPer = self._convert(self._maxPer) + #tested ok + _minSup, _maxPer, _lno = self._minSup, self._maxPer, len(self._Database) + if self._minSup > len(self._Database): + raise Exception("Please enter the minSup in range between 0 to 1") + + + items = {} + + # tested ok + for line in self._Database: + index = int(line[0]) + for item in line[1:]: + if item not in items: + items[item] = [] + items[item].append(index) + + root, itemNodes = self._construct(items, self._Database, _minSup, _maxPer, _lno, self._finalPatterns) + + self._recursive(root, itemNodes, _minSup, _maxPer, self._finalPatterns, _lno) + + + + newPattern = {} + for k, v in self._finalPatterns.items(): + newPattern["\t".join([str(x) for x in k])] = v + + self._finalPatterns = newPattern + + + + + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Periodic Frequent patterns were generated successfully using PFPGrowth algorithm ")
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """Calculating the total amount of runtime taken by the mining process + + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final periodic-frequent patterns in a dataframe + + :return: returning periodic-frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a, b[0], b[1]]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Periodicity']) + return dataFrame
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of periodic-frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x + ":" + str(y[0]) + ":" + str(y[1]) + #s1 = x.replace(' ', '\t').strip() + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> Dict[str, Tuple[int, int]]: + """ + Function to send the set of periodic-frequent patterns after completion of the mining process + + :return: returning periodic-frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + :return: None + """ + print("Total number of Periodic Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = PFPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = PFPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + print("Total number of Periodic-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") + + + + """ + **Methods to execute code on terminal** + -------------------------------------------- + .. code-block:: console + + Format: + + (.venv) $ python3 PFPGrowth.py <inputFile> <outputFile> <minSup> <maxPer> + + Example: + + (.venv) $ python3 PFPGrowth.py sampleTDB.txt patterns.txt 0.3 0.4 + + .. note:: minSup will be considered in percentage of database transactions + + **Importing this algorithm into a python program** + --------------------------------------------------- + .. code-block:: python + + from PAMI.periodicFrequentPattern.basic import PFPGrowth as alg + + obj = alg.PFPGrowth(iFile, minSup, maxPer) + + obj.startMine() + + periodicFrequentPatterns = obj.getPatterns() + + print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns)) + + obj.save(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + """ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/basic/PFPGrowthPlus.html b/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/basic/PFPGrowthPlus.html new file mode 100644 index 000000000..b7a7fcea2 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/basic/PFPGrowthPlus.html @@ -0,0 +1,839 @@ + + + + + + PAMI.periodicFrequentPattern.basic.PFPGrowthPlus — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.periodicFrequentPattern.basic.PFPGrowthPlus

+# PFPGrowthPlus is fundamental and improved version of PFPGrowth algorithm to discover periodic-frequent patterns in temporal database.
+# It uses greedy approach to discover effectively
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.periodicFrequentPattern.basic import PFPGrowthPlus as alg
+#
+#             obj = alg.PFPGrowthPlus("../basic/sampleTDB.txt", "2", "6")
+#
+#             obj.startMine()
+#
+#             periodicFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns))
+#
+#             obj.save("patterns")
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.periodicFrequentPattern.basic import abstract as _ab
+import pandas as pd
+from deprecated import deprecated
+
+from PAMI.periodicFrequentPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+
+_maxPer = float()
+_minSup = float()
+_lno = int()
+
+
+class _Node(object):
+    """
+    A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+
+        item : int
+            storing item of a node
+        timeStamps : list
+            To maintain the timestamps of transaction at the end of the branch
+        parent : node
+            To maintain the parent of every node
+        children : list
+            To maintain the children of node
+
+    :Methods:
+
+        addChild(itemName)
+            storing the children to their respective parent nodes
+    """
+
+    def __init__(self, item, children) -> None:
+        self.item = item
+        self.children = children
+        self.parent = None
+        self.timeStamps = []
+
+    def addChild(self, node) -> None:
+        self.children[node.item] = node
+        node.parent = self
+
+
+class _Tree(object):
+    """
+    A class used to represent the frequentPatternGrowth tree structure
+
+    :Attributes:
+
+        root : Node
+            Represents the root node of the tree
+        summaries : dictionary
+            storing the nodes with same item name
+        info : dictionary
+            stores the support of items
+
+    :Methods:
+
+        addTransaction(transaction)
+            creating transaction as a branch in frequentPatternTree
+        getConditionalPatterns(Node)
+            generates the conditional patterns from tree for specific node
+        conditionalTransactions(prefixPaths,Support)
+            takes the prefixPath of a node and support at child of the path and extract the frequent items from
+            prefixPaths and generates prefixPaths with items which are frequent
+        remove(Node)
+            removes the node from tree once after generating all the patterns respective to the node
+        generatePatterns(Node)
+            tarts from the root node of the tree and mines the periodic-frequent patterns
+    """
+
+    def __init__(self) -> None:
+        self.root = _Node(None, {})
+        self.summaries = {}
+        self.info = {}
+
+    def addTransaction(self, transaction, tid) -> None:
+        """
+        adding transaction into tree
+
+        :param transaction : it represents the one transaction in database
+        :type transaction : list
+        :param tid : represents the timestamp of transaction
+        :type tid : list
+        :return: None
+        """
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _Node(transaction[i], {})
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].append(newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+        currentNode.timeStamps = currentNode.timeStamps + tid
+
+    def getConditionalPatterns(self, alpha) -> Tuple[List[List], List[List], Dict]:
+        """
+        generates all the conditional patterns of respective node
+
+        :param alpha : it represents the Node in tree
+        :type alpha : Node
+        """
+        finalPatterns = []
+        finalSets = []
+        for i in self.summaries[alpha]:
+            set1 = i.timeStamps
+            set2 = []
+            while i.parent.item is not None:
+                set2.append(i.parent.item)
+                i = i.parent
+            if len(set2) > 0:
+                set2.reverse()
+                finalPatterns.append(set2)
+                finalSets.append(set1)
+        finalPatterns, finalSets, info = self.conditionalTransactions(finalPatterns, finalSets)
+        return finalPatterns, finalSets, info
+
+    @staticmethod
+    def generateTimeStamps(node) -> List:
+        finalTimeStamps = node.timeStamps
+        return finalTimeStamps
+
+    def removeNode(self, nodeValue):
+        """
+        removing the node from tree
+
+        :param nodeValue : it represents the node in tree
+        type nodeValue : node
+        """
+        for i in self.summaries[nodeValue]:
+            i.parent.timeStamps = i.parent.timeStamps + i.timeStamps
+            del i.parent.children[nodeValue]
+
+    def getTimeStamps(self, alpha):
+        """
+        to get all the timestamps related to a node in tree
+
+        :param alpha: node of a tree
+        :return: timestamps of a node
+        """
+        temporary = []
+        for i in self.summaries[alpha]:
+            temporary += i.timeStamps
+        return temporary
+
+    @staticmethod
+    def getSupportAndPeriod(timeStamps):
+        """
+        calculates the support and periodicity with list of timestamps
+
+        :param timeStamps : timestamps of a pattern.
+        :type timeStamps : list
+        """
+        global _maxPer, _lno
+        timeStamps.sort()
+        cur = 0
+        per = 0
+        sup = 0
+        for j in range(len(timeStamps)):
+            per = max(per, timeStamps[j] - cur)
+            if per > _maxPer:
+                return [0, 0]
+            cur = timeStamps[j]
+            sup += 1
+        per = max(per, _lno - cur)
+        return [sup, per]
+
+    def conditionalTransactions(self, conditionalPatterns, conditionalTimeStamps):
+        """
+        It generates the conditional patterns with periodic frequent items
+
+        :param conditionalPatterns : conditionalPatterns generated from conditionalPattern method for respective node
+        type conditionalPatterns : list
+        :param conditionalTimeStamps : represents the timestamps of conditional patterns of a node
+        :type conditionalTimeStamps : list
+        """
+        global _maxPer, _minSup
+        pat = []
+        timeStamps = []
+        data1 = {}
+        for i in range(len(conditionalPatterns)):
+            for j in conditionalPatterns[i]:
+                if j in data1:
+                    data1[j] = data1[j] + conditionalTimeStamps[i]
+                else:
+                    data1[j] = conditionalTimeStamps[i]
+        updatedDictionary = {}
+        for m in data1:
+            updatedDictionary[m] = self.getSupportAndPeriod(data1[m])
+        updatedDictionary = {k: v for k, v in updatedDictionary.items() if v[0] >= _minSup and v[1] <= _maxPer}
+        count = 0
+        for p in conditionalPatterns:
+            p1 = [v for v in p if v in updatedDictionary]
+            trans = sorted(p1, key=lambda x: (updatedDictionary.get(x)[0], -x), reverse=True)
+            if len(trans) > 0:
+                pat.append(trans)
+                timeStamps.append(conditionalTimeStamps[count])
+            count += 1
+        return pat, timeStamps, updatedDictionary
+
+    def generatePatterns(self, prefix):
+        """
+        generates the patterns
+
+        :param prefix : forms the combination of items
+        :type prefix : list
+        """
+        for i in sorted(self.summaries, key=lambda x: (self.info.get(x)[0], -x)):
+            pattern = prefix[:]
+            pattern.append(i)
+            yield pattern, self.info[i]
+            patterns, timeStamps, info = self.getConditionalPatterns(i)
+            conditionalTree = _Tree()
+            conditionalTree.info = info.copy()
+            for pat in range(len(patterns)):
+                conditionalTree.addTransaction(patterns[pat], timeStamps[pat])
+            if len(patterns) > 0:
+                for q in conditionalTree.generatePatterns(pattern):
+                    yield q
+            self.removeNode(i)
+
+
+
+[docs] +class PFPGrowthPlus(_ab._periodicFrequentPatterns): + """ + :Description: PFPGrowthPlus is fundamental and improved version of PFPGrowth algorithm to discover periodic-frequent patterns in temporal database. + It uses greedy approach to discover effectively + + :Reference: R. UdayKiran, MasaruKitsuregawa, and P. KrishnaReddyd, "Efficient discovery of periodic-frequent patterns in + very large databases," Journal of Systems and Software February 2016 https://doi.org/10.1016/j.jss.2015.10.035 + + :param iFile: str : + Name of the Input file to mine complete set of periodic frequent pattern's + :param oFile: str : + Name of the output file to store complete set of periodic frequent pattern's + :param minSup: str: + Controls the minimum number of transactions in which every item must appear in a database. + :param maxPer: str: + Controls the maximum number of transactions in which any two items within a pattern can reappear. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the Input file or path of the input file + oFile : file + Name of the output file or path of the output file + minSup : int or float or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + maxPer : int or float or str + The user can specify maxPer either in count or proportion of database size. + If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + Otherwise, it will be treated as float. + Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transaction + tree : class + it represents the Tree class + itemSetCount : int + it represents the total no of patterns + finalPatterns : dict + it represents to store the patterns + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of periodic-frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of periodic-frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + check(line) + To check the delimiter used in the user input file + creatingItemSets(fileName) + Scans the dataset or dataframes and stores in list format + PeriodicFrequentOneItem() + Extracts the one-periodic-frequent patterns from Databases + updateDatabases() + update the Databases by removing aperiodic items and sort the Database by item decreased support + buildTree() + after updating the Databases ar added into the tree by setting root node as null + startMine() + the main method to run the program + + + **Methods to execute code on terminal** + ------------------------------------------- + .. code-block:: console + + + Format: + + (.venv) $ python3 PFPGrowthPlus.py <inputFile> <outputFile> <minSup> <maxPer> + + Example: + + (.venv) $ python3 PFPGrowthPlus.py sampleTDB.txt patterns.txt 0.3 0.4 + + + .. note:: minSup will be considered in percentage of database transactions + + **Importing this algorithm into a python program** + ----------------------------------------------------------- + .. code-block:: python + + from PAMI.periodicFrequentPattern.basic import PFPGorwthPlus as alg + + obj = alg.PFPGrowthPlus("../basic/sampleTDB.txt", "2", "6") + + obj.startMine() + + periodicFrequentPatterns = obj.getPatterns() + + print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns)) + + obj.save("patterns") + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + ----------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + """ + + _minSup = str() + _maxPer = str() + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _rank = {} + _rankedUp = {} + _lno = 0 + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + :return: None + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + data, ts = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + ts = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + for i in range(len(data)): + tr = [ts[i][0]] + tr = tr + data[i] + self._Database.append(tr) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _periodicFrequentOneItem(self) -> Tuple[Dict, List]: + """ + calculates the support of each item in the dataset and assign the ranks to the items + by decreasing support and returns the frequent items list + + """ + data = {} + for tr in self._Database: + n = int(tr[0]) + for i in range(1, len(tr)): + if n <= self._maxPer: + if tr[i] not in data: + data[tr[i]] = [int(tr[0]), int(tr[0]), 1] + else: + data[tr[i]][0] = max(data[tr[i]][0], (int(tr[0]) - data[tr[i]][1])) + data[tr[i]][1] = int(tr[0]) + data[tr[i]][2] += 1 + else: + if tr[i] in data: + lp = abs(n - data[tr[i]][1]) + if lp > self._maxPer: + del data[tr[i]] + else: + data[tr[i]][0] = max(data[tr[i]][0], lp) + data[tr[i]][1] = int(tr[0]) + data[tr[i]][2] += 1 + for key in data: + data[key][0] = max(data[key][0], _lno - data[key][1]) + data = {k: [v[2], v[0]] for k, v in data.items() if v[0] <= self._maxPer and v[2] >= self._minSup} + genList = [k for k, v in sorted(data.items(), key=lambda x: (x[1][0], x[0]), reverse=True)] + self._rank = dict([(index, item) for (item, index) in enumerate(genList)]) + # genList=[k for k,v in sorted(data.items(),key=lambda x: (x[1][0],x[0]),reverse=True)] + return data, genList + + def _updateTransactions(self, dict1) -> List: + """ + remove the items which are not frequent from transactions and updates the transactions with rank of items + + :param dict1 : frequent items with support + :type dict1 : dictionary + """ + list1 = [] + for tr in self._Database: + list2 = [int(tr[0])] + for i in range(1, len(tr)): + if tr[i] in dict1: + list2.append(self._rank[tr[i]]) + if len(list2) >= 2: + basket = list2[1:] + basket.sort() + list2[1:] = basket[0:] + list1.append(list2) + return list1 + + @staticmethod + def _buildTree(data, info) -> _Tree: + """ + It takes the transactions and support of each item and construct the main tree with setting root node as null + + :param data : it represents the one transaction in database + :type data : list + :param info : it represents the support of each item + :type info : dictionary + """ + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(data)): + set1 = [data[i][0]] + rootNode.addTransaction(data[i][1:], set1) + return rootNode + + def _savePeriodic(self, itemSet) -> str: + """ + To convert item ranks into original item names + + :param itemSet: periodic-frequent pattern + :return: original itemSet + """ + t1 = str() + for i in itemSet: + t1 = t1 + self._rankedUp[i] + "\t" + return t1 + + def _convert(self, value) -> Union[int, float]: + """ + To convert the given user specified value + + :param value: user specified value + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + +
+[docs] + def startMine(self) -> None: + """ + Main method where the patterns are mined by constructing tree. + :return: None + """ + global _minSup, _maxPer, _lno + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minSup is None: + raise Exception("Please enter the Minimum Support") + self._creatingItemSets() + self._minSup = self._convert(self._minSup) + self._maxPer = self._convert(self._maxPer) + _minSup, _maxPer, _lno = self._minSup, self._maxPer, len(self._Database) + generatedItems, pfList = self._periodicFrequentOneItem() + updatedTransactions = self._updateTransactions(generatedItems) + for x, y in self._rank.items(): + self._rankedUp[y] = x + info = {self._rank[k]: v for k, v in generatedItems.items()} + Tree = self._buildTree(updatedTransactions, info) + patterns = Tree.generatePatterns([]) + self._finalPatterns = {} + for i in patterns: + x = self._savePeriodic(i[0]) + self._finalPatterns[x] = i[1] + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryRSS = float() + self._memoryUSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("periodic-frequent patterns were generated successfully using PFPGrowth++ algorithm ")
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final periodic-frequent patterns in a dataframe + + :return: returning periodic-frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataframe = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a, b[0], b[1]]) + dataframe = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Periodicity']) + return dataframe
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of periodic-frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x + ":" + str(y[0]) + ":" + str(y[1]) + #s1 = x.replace(' ','\t').strip() + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> Dict[str, Tuple[int, int]]: + """ + Function to send the set of periodic-frequent patterns after completion of the mining process + + :return: returning periodic-frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + :return: None + """ + print("Total number of Periodic Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = PFPGrowthPlus(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = PFPGrowthPlus(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + print("Total number of Periodic-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/basic/PFPMC.html b/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/basic/PFPMC.html new file mode 100644 index 000000000..603aa2f0d --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/basic/PFPMC.html @@ -0,0 +1,627 @@ + + + + + + PAMI.periodicFrequentPattern.basic.PFPMC — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.periodicFrequentPattern.basic.PFPMC

+# PFPMC is the fundamental approach to mine the periodic-frequent patterns.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.periodicFrequentPattern.basic import PFPMC as alg
+#
+#             obj = alg.PFPMC("../basic/sampleTDB.txt", "2", "5")
+#
+#             obj.startMine()
+#
+#             periodicFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns))
+#
+#             obj.save("patterns")
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.periodicFrequentPattern.basic import abstract as _ab
+import pandas as pd
+from deprecated import deprecated
+from itertools import groupby as _groupby
+from operator import itemgetter as _itemgetter
+from PAMI.periodicFrequentPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+
+
+
+[docs] +class PFPMC(_ab._periodicFrequentPatterns): + """ + :Description: PFPMC is the fundamental approach to mine the periodic-frequent patterns. + + :Reference: (has to be added) + + :param iFile: str : + Name of the Input file to mine complete set of periodic frequent pattern's + :param oFile: str : + Name of the output file to store complete set of periodic frequent pattern's + :param minSup: str: + Controls the minimum number of transactions in which every item must appear in a database. + :param maxPer: str: + Controls the maximum number of transactions in which any two items within a pattern can reappear. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + iFile : file + Name of the Input file or path of the input file + oFile : file + Name of the output file or path of the output file + minSup : int or float or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + maxPer : int or float or str + The user can specify maxPer either in count or proportion of database size. + If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + Otherwise, it will be treated as float. + Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transactions + tree : class + it represents the Tree class + itemSetCount : int + it represents the total no of patterns + finalPatterns : dict + it represents to store the patterns + tidList : dict + stores the timestamps of an item + hashing : dict + stores the patterns with their support to check for the closed property + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of periodic-frequent patterns will be loaded in to an output file + getPatternsAsDataFrame() + Complete set of periodic-frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingOneItemSets() + Scan the database and store the items with their timestamps which are periodic frequent + getPeriodAndSupport() + Calculates the support and period for a list of timestamps. + Generation() + Used to implement prefix class equivalence method to generate the periodic patterns recursively + + + **Methods to execute code on terminal** + ------------------------------------------ + .. code-block:: console + + + Format: + + (.venv) $ python3 PFPMC.py <inputFile> <outputFile> <minSup> <maxPer> + + Example usage: + + (.venv) $ python3 PFPMC.py sampleDB.txt patterns.txt 10.0 4.0 + + + .. note:: minSup and maxPer will be considered in percentage of database transactions + + **Importing this algorithm into a python program** + ---------------------------------------------------- + .. code-block:: python + + from PAMI.periodicFrequentPattern.basic import PFPMC as alg + + obj = alg.PFPMC("../basic/sampleTDB.txt", "2", "5") + + obj.startMine() + + periodicFrequentPatterns = obj.getPatterns() + + print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns)) + + obj.save("patterns") + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + ---------------- + + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + _iFile = " " + _oFile = " " + _sep = " " + _dbSize = None + _Database = None + _minSup = str() + _maxPer = str() + _tidSet = set() + _finalPatterns = {} + _startTime = None + _endTime = None + _lastTid = int() + _memoryUSS = float() + _memoryRSS = float() + + def _getPeriodic(self, tids: set) -> int: + """ + To get Periodic frequent patterns + + :param tids: represents the timestamp of a transaction + :type tids: set + :return: None + """ + tids = list(tids) + tids.sort() + temp = self._maxPer + 1 + diffs = [] + if self._lastTid in tids: + tids.remove(self._lastTid) + for k, g in _groupby(enumerate(tids), lambda ix: ix[0] - ix[1]): + diffs.append(len(list(map(_itemgetter(1), g)))) + if len(diffs) < 1: + return temp + return max(diffs) + 1 + + def _getPeriodic(self, tids: set): + + tids = list(tids) + tids.sort() + temp = self._maxPer + 1 + if self._lastTid in tids: + tids.remove(self._lastTid) + diffs = [] + # find the longest consecutive period + + count = 0 + for i in range(len(tids) - 1): + if tids[i + 1] == tids[i] + 1: + count += 1 + else: + diffs.append(count) + count = 0 + if len(diffs) < 1: + return temp + return max(diffs) + 1 + + def _getPeriodic(self, tids: set): + tids = list(tids) + tids.sort() + temp = self._maxPer + 1 + if self._lastTid in tids: + tids.remove(self._lastTid) + diffs = [] + tempPer = 0 + period = 0 + for i in range(len(tids) - 1): + if tids[i+1] - tids[i] == 1: + tempPer += 1 + else: + period = max(period, tempPer + 1) + if period > self._maxPer: + return temp + tempPer = 0 + + return period + + def _convert(self, value) -> float: + """ + To convert the given user specified value + + :param value: user specified value + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (self._dbSize * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (self._dbSize * value) + else: + value = int(value) + return value + + def _creatingOneItemSets(self) -> list: + """ + Storing the complete transactions of the database/input file in a database variable + :return: list + """ + plist = [] + Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + ts, data = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + ts = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + for i in range(len(data)): + tr = [ts[i][0]] + tr = tr + data[i] + Database.append(tr) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + Database.append(temp) + except IOError: + print("File Not Found") + quit() + tid = 0 + itemsets = {} # {key: item, value: list of tids} + periodicHelper = {} # {key: item, value: [period, last_tid]} + for line in Database: + tid = int(line[0]) + self._tidSet.add(tid) + for item in line[1:]: + if item in itemsets: + itemsets[item].add(tid) + else: + itemsets[item] = {tid} + + self._dbSize = len(Database) + self._lastTid = max(self._tidSet) + self._minSup = self._convert(self._minSup) + self._maxPer = self._convert(self._maxPer) + del Database + candidates = [] + for item, tids in itemsets.items(): + diff = self._tidSet.difference(tids) + per = self._getPeriodic(diff) + sup = len(tids) + if sup >= self._minSup and per <= self._maxPer: + candidates.append(item) + self._finalPatterns[item] = [sup, per, diff] + return candidates + + def _generateDiffsetEclat(self, candidates: list) -> None: + new_freqList = [] + for i in range(0, len(candidates)): + item1 = candidates[i] + i1_list = item1.split() + for j in range(i + 1, len(candidates)): + item2 = candidates[j] + i2_list = item2.split() + if i1_list[:-1] == i2_list[:-1]: + union_DiffSet = self._finalPatterns[item2][2].union(self._finalPatterns[item1][2]) + sorted(union_DiffSet) + union_supp = self._dbSize - len(union_DiffSet) + period = self._getPeriodic(union_DiffSet) + if union_supp >= self._minSup and period <= self._maxPer: + newKey = item1 + "\t" + i2_list[-1] + self._finalPatterns[newKey] = [union_supp, period, union_DiffSet] + new_freqList.append(newKey) + else: + break + + if len(new_freqList) > 0: + self._generateDiffsetEclat(new_freqList) + +
+[docs] + def startMine(self) -> None: + """ + Mining process will start from this function + :return: None + """ + # print(f"Optimized {type(self).__name__}") + self._startTime = _ab._time.time() + self._finalPatterns = {} + frequentSets = self._creatingOneItemSets() + self._generateDiffsetEclat(frequentSets) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryRSS = float() + self._memoryUSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Periodic-Frequent patterns were generated successfully using PFPDiffset ECLAT algorithm ")
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final periodic-frequent patterns in a dataframe + + :return: returning periodic-frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataframe = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a, b[0], b[1]]) + dataframe = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Periodicity']) + return dataframe
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of periodic-frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x + ":" + str(y[0]) + ":" + str(y[1]) + #s1 = x.replace(' ', '\t') + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> dict: + """ + Function to send the set of periodic-frequent patterns after completion of the mining process + + :return: returning periodic-frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + :return: None + """ + print("Total number of Periodic Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = PFPMC(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = PFPMC(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + print("Total number of Periodic-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/basic/PSGrowth.html b/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/basic/PSGrowth.html new file mode 100644 index 000000000..26cac50a3 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/basic/PSGrowth.html @@ -0,0 +1,1127 @@ + + + + + + PAMI.periodicFrequentPattern.basic.PSGrowth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.periodicFrequentPattern.basic.PSGrowth

+#  PS-Growth is one of the fundamental algorithm to discover periodic-frequent patterns in a temporal database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.periodicFrequentPattern.basic import PSGrowth as alg
+#
+#             obj = alg.PSGrowth("../basic/sampleTDB.txt", "2", "6")
+#
+#             obj.startMine()
+#
+#             periodicFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of  Patterns:", len(periodicFrequentPatterns))
+#
+#             obj.save("patterns")
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.periodicFrequentPattern.basic import abstract as _ab
+import pandas as pd
+from deprecated import deprecated
+from itertools import combinations as _combinations
+from PAMI.periodicFrequentPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator  
+
+_pfList = []
+_minSup = int()
+_maxPer = int()
+_lno = int()
+
+
+class _Interval(object):
+    """
+    To represent the timestamp interval of a node in summaries
+    """
+
+    def __init__(self, start, end, per, sup) -> None:
+        self.start = start
+        self.end = end
+        self.per = per
+        self.sup = sup
+
+
+class _NodeSummaries(object):
+    """
+    To define the summaries of timeStamps of a node
+
+    :Attributes:
+
+        totalSummaries : list
+            stores the summaries of timestamps
+
+    :Methods:
+
+        insert(timeStamps)
+            inserting and merging the timestamps into the summaries of a node
+    """
+
+    def __init__(self) -> None:
+        self.totalSummaries = []
+
+    def insert(self, tid) -> List[_Interval]:
+        """ To insert and merge the timeStamps into summaries of a node
+            :param tid: timeStamps of a node
+            :return: summaries of a node
+        """
+        k = self.totalSummaries[-1]
+        diff = tid - k.end
+        if diff <= _maxPer:
+            k.end = tid
+            k.per = max(diff, k.per)
+            #             print(k.per)
+            k.sup += 1
+        else:
+            self.totalSummaries.append(_Interval(tid, tid, 0, 1))
+        return self.totalSummaries
+
+
+def _merge(summariesX, summariesY) -> List[_Interval]:
+    """
+    To Merge the timeStamps
+
+    :param summariesX:  TimeStamps of a one itemSet
+    :param summariesY:  TimeStamps of a one itemSet
+    :return:  Merged timestamp of both itemSets
+    """
+    iter1 = 0
+    iter2 = 0
+    updatedSummaries = []
+    l1 = len(summariesX)
+    l2 = len(summariesY)
+    while 1:
+        if summariesX[iter1].start < summariesY[iter2].start:
+            if summariesX[iter1].end < summariesY[iter2].start:
+                diff = summariesY[iter2].start - summariesX[iter1].end
+                if diff > _maxPer:
+                    updatedSummaries.append(_Interval(summariesX[iter1].start,
+                                                     summariesX[iter1].end, summariesX[iter1].per,
+                                                     summariesX[iter1].sup))
+                    iter1 += 1
+                    if iter1 >= l1:
+                        ck = 1
+                        break
+                else:
+                    per1 = max(diff, summariesX[iter1].per)
+                    per1 = max(per1, summariesY[iter2].per)
+                    updatedSummaries.append(
+                        _Interval(summariesX[iter1].start, summariesY[iter2].end, per1,
+                                 summariesX[iter1].sup + summariesY[iter2].sup))
+                    iter1 += 1
+                    iter2 += 1
+                    if iter1 >= l1:
+                        ck = 1
+                        break
+
+                    if iter2 >= l2:
+                        ck = 2
+                        break
+
+            else:
+                if summariesX[iter1].end > summariesY[iter2].end:
+                    updatedSummaries.append(_Interval(summariesX[iter1].start, summariesX[iter1].end,
+                                                     summariesX[iter1].per,
+                                                     summariesX[iter1].sup + summariesY[iter2].sup))
+                else:
+                    per1 = max(summariesX[iter1].per, summariesY[iter2].per)
+                    updatedSummaries.append(
+                        _Interval(summariesX[iter1].start, summariesY[iter2].end, per1,
+                                 summariesX[iter1].sup + summariesY[iter2].sup))
+                iter1 += 1
+                iter2 += 1
+                if iter1 >= l1:
+                    ck = 1
+                    break
+
+                if iter2 >= l2:
+                    ck = 2
+                    break
+        else:
+            if summariesY[iter2].end < summariesX[iter1].start:
+                diff = summariesX[iter1].start - summariesY[iter2].end
+                if diff > _maxPer:
+                    updatedSummaries.append(_Interval(summariesY[iter2].start, summariesY[iter2].end,
+                                                     summariesY[iter2].per, summariesY[iter2].sup))
+                    iter2 += 1
+                    if iter2 >= l2:
+                        ck = 2
+                        break
+                else:
+                    per1 = max(diff, summariesY[iter2].per)
+                    per1 = max(per1, summariesX[iter1].per)
+                    updatedSummaries.append(
+                        _Interval(summariesY[iter2].start, summariesX[iter1].end, per1,
+                                 summariesY[iter2].sup + summariesX[iter1].sup))
+                    iter2 += 1
+                    iter1 += 1
+                    if iter2 >= l2:
+                        ck = 2
+                        break
+
+                    if iter1 >= l1:
+                        ck = 1
+                        break
+
+            else:
+                if summariesY[iter2].end > summariesX[iter1].end:
+                    updatedSummaries.append(_Interval(summariesY[iter2].start, summariesY[iter2].end,
+                                                     summariesY[iter2].per,
+                                                     summariesY[iter2].sup + summariesX[iter1].sup))
+                else:
+                    per1 = max(summariesY[iter2].per, summariesX[iter1].per)
+                    updatedSummaries.append(
+                        _Interval(summariesY[iter2].start, summariesX[iter1].end, per1,
+                                 summariesY[iter2].sup + summariesX[iter1].sup))
+                iter2 += 1
+                iter1 += 1
+                if iter2 >= l2:
+                    ck = 2
+                    break
+
+                if iter1 >= l1:
+                    ck = 1
+                    break
+    if ck == 1:
+        while iter2 < l2:
+            updatedSummaries.append(summariesY[iter2])
+            iter2 += 1
+    else:
+        while iter1 < l1:
+            updatedSummaries.append(summariesX[iter1])
+            iter1 += 1
+    updatedSummaries = _update(updatedSummaries)
+
+    return updatedSummaries
+
+
+def _update(updatedSummaries) -> List[_Interval]:
+    """ After updating the summaries with first, last, and period elements in summaries
+
+    :param updatedSummaries: summaries that have been merged
+    :return: updated summaries of a node
+    """
+    summaries = [updatedSummaries[0]]
+    cur = updatedSummaries[0]
+    for i in range(1, len(updatedSummaries)):
+        v = (updatedSummaries[i].start - cur.end)
+        if cur.end > updatedSummaries[i].start or v <= _maxPer:
+            cur.end = max(updatedSummaries[i].end, cur.end)
+            cur.sup += updatedSummaries[i].sup
+            cur.per = max(cur.per, updatedSummaries[i].per)
+            cur.per = max(cur.per, v)
+        else:
+            summaries.append(updatedSummaries[i])
+        cur = summaries[-1]
+    return summaries
+
+
+
+[docs] +class Node(object): + """ + A class used to represent the node of frequentPatternTree + + :Attributes: + + item : int + storing item of a node + timeStamps : list + To maintain the timeStamps of Database at the end of the branch + parent : node + To maintain the parent of every node + children : list + To maintain the children of node + + :Methods: + + addChild(itemName) + storing the children to their respective parent nodes + """ + + def __init__(self, item, children) -> None: + """ + Initializing the Node class + + :param item: Storing the item of a node + :type item: int + :param children: To maintain the children of a node + :type children: dict + :return: None + """ + self.item = item + self.children = children + self.parent = None + self.timeStamps = _NodeSummaries() + +
+[docs] + def addChild(self, node) -> None: + """ + Appends the children node details to a parent node + + :param node: children node + :return: appending children node to parent node + """ + self.children[node.item] = node + node.parent = self
+
+ + + +class _Tree(object): + """ + + A class used to represent the frequentPatternGrowth tree structure + + :Attributes: + + root : Node or None + Represents the root node of the tree + summaries : dictionary + storing the nodes with same item name + info : dictionary + stores the support of items + + :Methods: + + addTransaction(Database) + creating Database as a branch in frequentPatternTree + addConditionalTransactions(prefixPaths, supportOfItems) + construct the conditional tree for prefix paths + getConditionalPatterns(Node) + generates the conditional patterns from tree for specific node + conditionalTransaction(prefixPaths,Support) + takes the prefixPath of a node and support at child of the path and extract the frequent items from + prefixPaths and generates prefixPaths with items which are frequent + remove(Node) + removes the node from tree once after generating all the patterns respective to the node + generatePatterns(Node) + starts from the root node of the tree and mines the periodic-frequent patterns + + """ + + def __init__(self) -> None: + self.root = Node(None, {}) + self.summaries = {} + self.info = {} + + def addTransaction(self, transaction, tid) -> None: + """ + Adding transaction into the tree + + :param transaction: it represents the one transaction in a database + :type transaction: list + :param tid: represents the timestamp of a transaction + :type tid: list + :return: None + """ + currentNode = self.root + for i in range(len(transaction)): + if transaction[i] not in currentNode.children: + newNode = Node(transaction[i], {}) + currentNode.addChild(newNode) + if transaction[i] in self.summaries: + self.summaries[transaction[i]].append(newNode) + else: + self.summaries[transaction[i]] = [newNode] + currentNode = newNode + else: + currentNode = currentNode.children[transaction[i]] + if len(currentNode.timeStamps.totalSummaries) != 0: + currentNode.timeStamps.insert(tid) + else: + currentNode.timeStamps.totalSummaries.append(_Interval(tid, tid, 0, 1)) + + def addConditionalPatterns(self, transaction, tid) -> None: + """ + To add the conditional transactions in to conditional tree + + :param transaction: conditional transaction list of a node + :param tid: timestamp of a conditional transaction + :return: the conditional tree of a node + """ + currentNode = self.root + for i in range(len(transaction)): + if transaction[i] not in currentNode.children: + newNode = Node(transaction[i], {}) + currentNode.addChild(newNode) + if transaction[i] in self.summaries: + self.summaries[transaction[i]].append(newNode) + else: + self.summaries[transaction[i]] = [newNode] + currentNode = newNode + else: + currentNode = currentNode.children[transaction[i]] + if len(currentNode.timeStamps.totalSummaries) != 0: + currentNode.timeStamps.totalSummaries = _merge(currentNode.timeStamps.totalSummaries, tid) + else: + currentNode.timeStamps.totalSummaries = tid + + def getConditionalPatterns(self, alpha) -> Tuple[List[List[int]], List[List[_Interval]], Dict[int, Tuple[int, int]]]: + """ + To mine the conditional patterns of a node + + :param alpha: starts from the leaf node of a tree + :return: the conditional patterns of a node + """ + finalPatterns = [] + finalSets = [] + for i in self.summaries[alpha]: + set1 = i.timeStamps.totalSummaries + set2 = [] + while i.parent.item is not None: + set2.append(i.parent.item) + i = i.parent + if len(set2) > 0: + set2.reverse() + finalPatterns.append(set2) + finalSets.append(set1) + finalPatterns, finalSets, info = conditionalTransactions(finalPatterns, finalSets) + return finalPatterns, finalSets, info + + def removeNode(self, nodeValue) -> None: + """ + to remove the node from the tree by pushing the timeStamps of leaf node to the parent node + + :param nodeValue: name of node to be deleted + :return: removes the node from the tree + """ + for i in self.summaries[nodeValue]: + if len(i.parent.timeStamps.totalSummaries) != 0: + i.parent.timeStamps.totalSummaries = _merge(i.parent.timeStamps.totalSummaries, + i.timeStamps.totalSummaries) + else: + i.parent.timeStamps.totalSummaries = i.timeStamps.totalSummaries + del i.parent.children[nodeValue] + del i + del self.summaries[nodeValue] + + def getTimeStamps(self, alpha) -> List[_Interval]: + """ + To get the timeStamps of a respective node + + :param alpha: name of node for the timeStamp + :return: timeStamps of a node + """ + temp = [] + for i in self.summaries[alpha]: + temp += i.timeStamps + return temp + + def check(self) -> int: + """ + To the total number of child and their summaries + :return: int + """ + k = self.root + while len(k.children) != 0: + if len(k.children) > 1: + return 1 + if len(k.children) != 0 and len(k.timeStamps.totalSummaries) > 0: + return 1 + for j in k.children: + v = k.children[j] + k = v + return -1 + + def generatePatterns(self, prefix) -> None: + """ + Generating the patterns from the tree + + :param prefix: empty list to form the combinations + :return: returning the periodic-frequent patterns from the tree + """ + global _pfList + for i in sorted(self.summaries, key=lambda x: (self.info.get(x)[0], -x)): + pattern = prefix[:] + pattern.append(_pfList[i]) + yield pattern, self.info[i] + patterns, timeStamps, info = self.getConditionalPatterns(i) + conditionalTree = _Tree() + conditionalTree.info = info.copy() + for pat in range(len(patterns)): + conditionalTree.addConditionalPatterns(patterns[pat], timeStamps[pat]) + find = conditionalTree.check() + if find == 1: + del patterns, timeStamps, info + for cp in conditionalTree.generatePatterns(pattern): + yield cp + else: + if len(conditionalTree.info) != 0: + j = [] + for r in timeStamps: + j += r + inf = getPeriodAndSupport(j) + patterns[0].reverse() + upp = [] + for jm in patterns[0]: + upp.append(_pfList[jm]) + allSubsets = _subLists(upp) + # print(upp,inf) + for pa in allSubsets: + yield pattern + pa, inf + del patterns, timeStamps, info + del conditionalTree + self.removeNode(i) + + +def _subLists(itemSet) -> List[List[int]]: + """ + Forms all the subsets of given itemSet + + :param itemSet: itemSet or a list of periodic-frequent items + :return: subsets of itemSet + """ + subs = [] + for i in range(1, len(itemSet) + 1): + temp = [list(x) for x in _combinations(itemSet, i)] + if len(temp) > 0: + subs.extend(temp) + + return subs + + +
+[docs] +def getPeriodAndSupport(timeStamps) -> List[int]: + """ + Calculates the period and support of list of timeStamps + + :param timeStamps: timeStamps of a pattern or item + :return: support and periodicity + """ + cur = 0 + per = 0 + sup = 0 + for j in range(len(timeStamps)): + per = max(per, timeStamps[j].start - cur) + per = max(per, timeStamps[j].per) + if per > _maxPer: + return [0, 0] + cur = timeStamps[j].end + sup += timeStamps[j].sup + per = max(per, _lno - cur) + return [sup, per]
+ + + +
+[docs] +def conditionalTransactions(patterns, timestamp) -> Tuple[List[List[int]], List[List[_Interval]], Dict[int, Tuple[int, int]]]: + """ + To sort and update the conditional transactions by removing the items which fails frequency + and periodicity conditions + + :param patterns: conditional patterns of a node + :param timestamp: timeStamps of a conditional pattern + :return: conditional transactions with their respective timeStamps + """ + global _minSup, _maxPer + pat = [] + timeStamps = [] + data1 = {} + for i in range(len(patterns)): + for j in patterns[i]: + if j in data1: + data1[j] = _merge(data1[j], timestamp[i]) + else: + data1[j] = timestamp[i] + + updatedDict = {} + for m in data1: + updatedDict[m] = getPeriodAndSupport(data1[m]) + updatedDict = {k: v for k, v in updatedDict.items() if v[0] >= _minSup and v[1] <= _maxPer} + count = 0 + for p in patterns: + p1 = [v for v in p if v in updatedDict] + trans = sorted(p1, key=lambda x: (updatedDict.get(x)[0], -x), reverse=True) + if len(trans) > 0: + pat.append(trans) + timeStamps.append(timestamp[count]) + count += 1 + return pat, timeStamps, updatedDict
+ + + +
+[docs] +class PSGrowth(_ab._periodicFrequentPatterns): + """ + :Description: PS-Growth is one of the fundamental algorithm to discover periodic-frequent patterns in a temporal database. + + :Reference : A. Anirudh, R. U. Kiran, P. K. Reddy and M. Kitsuregaway, "Memory efficient mining of periodic-frequent + patterns in transactional databases," 2016 IEEE Symposium Series on Computational Intelligence (SSCI), + 2016, pp. 1-8, https://doi.org/10.1109/SSCI.2016.7849926 + + :param iFile: str : + Name of the Input file to mine complete set of periodic frequent pattern's + :param oFile: str : + Name of the output file to store complete set of periodic frequent pattern's + :param minSup: str: + Controls the minimum number of transactions in which every item must appear in a database. + :param maxPer: str: + Controls the maximum number of transactions in which any two items within a pattern can reappear. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + iFile : file + Name of the Input file or path of the input file + oFile : file + Name of the output file or path of the output file + minSup: int or float or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + maxPer: int or float or str + The user can specify maxPer either in count or proportion of database size. + If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + Otherwise, it will be treated as float. + Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default separator is tab space or \t. + However, the users can override their default separator. + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transaction + tree : class + it represents the Tree class + itemSetCount : int + it represents the total no of patterns + finalPatterns : dict + it represents to store the patterns + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of periodic-frequent patterns will be loaded in to an output file + getConditionalPatternsInDataFrame() + Complete set of periodic-frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + OneLengthItems() + Scans the dataset or dataframes and stores in list format + buildTree() + after updating the Databases ar added into the tree by setting root node as null + + + **Methods to execute code on terminal** + ----------------------------------------- + .. code-block:: console + + + Format: + + (.venv) $ python3 PSGrowth.py <inputFile> <outputFile> <minSup> <maxPer> + + Example: + + (.venv) $ python3 PSGrowth.py sampleTDB.txt patterns.txt 0.3 0.4 + + + + .. note:: minSup will be considered in percentage of database transactions + + + **Importing this algorithm into a python program** + ---------------------------------------------------- + .. code-block:: python + + from PAMI.periodicFrequentPattern.basic import PSGrowth as alg + + obj = alg.PSGrowth("../basic/sampleTDB.txt", "2", "6") + + obj.startMine() + + periodicFrequentPatterns = obj.getPatterns() + + print("Total number of Patterns:", len(periodicFrequentPatterns)) + + obj.save("patterns") + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + -------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + _startTime = float() + _endTime = float() + _minSup = str() + _maxPer = str() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _rank = {} + _lno = 0 + + def _convert(self, value) -> float: + """ + To convert the given user specified value + + :param value: user specified value + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + :return: None + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + ts, data = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + ts = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + for i in range(len(data)): + tr = [ts[i][0]] + tr = tr + data[i] + self._Database.append(tr) + + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _OneLengthItems(self): + """ + Storing the complete values of a database/input file into a database variable + """ + data = {} + global _minSup, _maxPer, _lno + for tr in self._Database: + self._lno += 1 + for i in range(1, len(tr)): + if tr[i] not in data: + data[tr[i]] = [int(tr[0]), int(tr[0]), 1] + else: + data[tr[i]][0] = max(data[tr[i]][0], (int(tr[0]) - data[tr[i]][1])) + data[tr[i]][1] = int(tr[0]) + data[tr[i]][2] += 1 + for key in data: + data[key][0] = max(data[key][0], self._lno - data[key][1]) + self._minSup = self._convert(self._minSup) + self._maxPer = self._convert(self._maxPer) + _minSup, _maxPer, _lno = self._minSup, self._maxPer, self._lno + data = {k: [v[2], v[0]] for k, v in data.items() if v[0] <= self._maxPer and v[2] >= self._minSup} + genList = [k for k, v in sorted(data.items(), key=lambda x: (x[1][0], x[0]), reverse=True)] + self._rank = dict([(index, item) for (item, index) in enumerate(genList)]) + return data, genList + + def _buildTree(self, info, sampleDict) -> _Tree: + """ + it takes the Databases and support of each item and construct the main tree with setting root node as null + + :param info: it represents the support of each item + :type info: dictionary + :param sampleDict: One length periodic-frequent patterns in a dictionary + :type sampleDict: dict + :return: Returns the root node of the tree + """ + rootNode = _Tree() + rootNode.info = info.copy() + k = 0 + for line in self._Database: + k += 1 + tr = line + list2 = [int(tr[0])] + for i in range(1, len(tr)): + if tr[i] in sampleDict: + list2.append(self._rank[tr[i]]) + if len(list2) >= 2: + basket = list2[1:] + basket.sort() + list2[1:] = basket[0:] + rootNode.addTransaction(list2[1:], list2[0]) + return rootNode + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self) -> None: + """ + Mining process will start from this function + :return: None + """ + global _minSup, _maxPer, _lno, _pfList + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minSup is None: + raise Exception("Please enter the Minimum Support") + self._creatingItemSets() + OneLengthPeriodicItems, _pfList = self._OneLengthItems() + info = {self._rank[k]: v for k, v in OneLengthPeriodicItems.items()} + Tree = self._buildTree(info, OneLengthPeriodicItems) + patterns = Tree.generatePatterns([]) + self._finalPatterns = {} + for i in patterns: + sample = str() + for k in i[0]: + sample = sample + k + "\t" + self._finalPatterns[sample] = i[1] + self._endTime = _ab._time.time() + self._memoryUSS = float() + self._memoryRSS = float() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Periodic-Frequent patterns were generated successfully using PS-Growth algorithm ")
+ + +
+[docs] + def Mine(self) -> None: + """ + Mining process will start from this function + :return: None + """ + global _minSup, _maxPer, _lno, _pfList + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minSup is None: + raise Exception("Please enter the Minimum Support") + self._creatingItemSets() + OneLengthPeriodicItems, _pfList = self._OneLengthItems() + info = {self._rank[k]: v for k, v in OneLengthPeriodicItems.items()} + Tree = self._buildTree(info, OneLengthPeriodicItems) + patterns = Tree.generatePatterns([]) + self._finalPatterns = {} + for i in patterns: + sample = str() + for k in i[0]: + sample = sample + k + "\t" + self._finalPatterns[sample] = i[1] + self._endTime = _ab._time.time() + self._memoryUSS = float() + self._memoryRSS = float() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Periodic-Frequent patterns were generated successfully using PS-Growth algorithm ")
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """Calculating the total amount of runtime taken by the mining process + + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final periodic-frequent patterns in a dataframe + + :return: returning periodic-frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a, b[0], b[1]]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Periodicity']) + return dataFrame
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of periodic-frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x + ":" + str(y[0]) + ":" + str(y[1]) + #s1 = x.replace(' ', '\t').strip() + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> dict: + """ + Function to send the set of periodic-frequent patterns after completion of the mining process + + :return: returning periodic-frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self)-> None: + """ + This function is used to print the results + :return: None + """ + print("Total number of Periodic Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = PSGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = PSGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + print("Total number of Periodic-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/closed/CPFPMiner.html b/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/closed/CPFPMiner.html new file mode 100644 index 000000000..ba116a922 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/closed/CPFPMiner.html @@ -0,0 +1,768 @@ + + + + + + PAMI.periodicFrequentPattern.closed.CPFPMiner — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.periodicFrequentPattern.closed.CPFPMiner

+#  CPFPMiner algorithm is used to discover the closed periodic frequent patterns in temporal databases.
+#  It uses depth-first search.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.periodicFrequentPattern.closed import CPFPMiner as alg
+#
+#             obj = alg.CPFPMiner("../basic/sampleTDB.txt", "2", "6")
+#
+#             obj.startMine()
+#
+#             periodicFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(periodicFrequentPatterns))
+#
+#             obj.save("patterns")
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.periodicFrequentPattern.basic import abstract as _ab
+import pandas as pd
+from deprecated import deprecated
+
+from PAMI.periodicFrequentPattern.closed import abstract as _ab
+
+
+[docs] +class CPFPMiner(_ab._periodicFrequentPatterns): + """ + About this algorithm + ==================== + + :Description: CPFPMiner algorithm is used to discover the closed periodic frequent patterns in temporal databases. + It uses depth-first search. + + :Reference: P. Likhitha et al., "Discovering Closed Periodic-Frequent Patterns in Very Large Temporal Databases" + 2020 IEEE International Conference on Big Data (Big Data), 2020, https://ieeexplore.ieee.org/document/9378215 + + :param iFile: str : + Name of the Input file to mine complete set of periodic frequent pattern's + :param oFile: str : + Name of the output file to store complete set of periodic frequent pattern's + :param minSup: float: + Controls the minimum number of transactions in which every item must appear in a database. + :param maxPer: float: + Controls the maximum number of transactions in which any two items within a pattern can reappear. + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + iFile : str + Input file name or path of the input file + oFile : str + Name of the output file or path of the input file + minSup: int or float or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + maxPer: int or float or str + The user can specify maxPer either in count or proportion of database size. + If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + Otherwise, it will be treated as float. + Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + finalPatterns: dict + Storing the complete set of patterns in a dictionary variable + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to an output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + + + Execution methods + ================= + + + **Terminal command** + + + .. code-block:: console + + Format: + + (.venv) $ python3 CPFPMiner.py <inputFile> <outputFile> <minSup> <maxPer> + + Example: + + (.venv) $ python3 CPFPMiner.py sampleTDB.txt patterns.txt 0.3 0.4 + + .. note:: minSup will be considered in percentage of database transactions + + + **Calling from a python program** + + .. code-block:: python + + from PAMI.periodicFrequentPattern.closed import CPFPMiner as alg + + obj = alg.CPFPMiner("../basic/sampleTDB.txt", "2", "6") + + obj.startMine() + + periodicFrequentPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(periodicFrequentPatterns)) + + obj.save("patterns") + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + ------------------ + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + """ + + _minSup = float() + _maxPer = float() + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _transaction = [] + _hashing = {} + _mapSupport = {} + _itemSetCount = 0 + _maxItemId = 0 + _tableSize = 10000 + _tidList = {} + _lno = 0 + + def __init__(self, iFile, minSup, maxPer, sep='\t'): + super().__init__(iFile, minSup, maxPer, sep) + self._finalPatterns = {} + + def _convert(self, value): + """ + To convert the given user specified value + + :param value: user specified value + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (self._lno * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (self._lno * value) + else: + value = int(value) + return value + + def _scanDatabase(self): + """ + To scan the database and extracts the 1-length periodic-frequent items + + :return: Returns the 1-length periodic-frequent items + """ + Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + ts, data = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + ts = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + for i in range(len(data)): + tr = [ts[i][0]] + tr = tr + data[i] + Database.append(tr) + + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + Database.append(temp) + except IOError: + print("File Not Found") + quit() + self._tidList = {} + self._mapSupport = {} + for line in Database: + self._lno += 1 + s = line + n = int(s[0]) + for i in range(1, len(s)): + si = s[i] + if self._mapSupport.get(si) is None: + self._mapSupport[si] = [1, abs(0 - n), n] + self._tidList[si] = [n] + else: + self._mapSupport[si][0] += 1 + self._mapSupport[si][1] = max(self._mapSupport[si][1], abs(n - self._mapSupport[si][2])) + self._mapSupport[si][2] = n + self._tidList[si].append(n) + for x, y in self._mapSupport.items(): + self._mapSupport[x][1] = max(self._mapSupport[x][1], abs(self._lno - self._mapSupport[x][2])) + self._minSup = self._convert(self._minSup) + self._maxPer = self._convert(self._maxPer) + self._mapSupport = {k: [v[0], v[1]] for k, v in self._mapSupport.items() if + v[0] >= self._minSup and v[1] <= self._maxPer} + periodicFrequentItems = {} + self._tidList = {k: v for k, v in self._tidList.items() if k in self._mapSupport} + for x, y in self._tidList.items(): + t1 = 0 + for i in y: + t1 += i + periodicFrequentItems[x] = t1 + periodicFrequentItems = [key for key, value in sorted(periodicFrequentItems.items(), key=lambda x: x[1])] + return periodicFrequentItems + + def _calculate(self, tidSet): + """ + To calculate the weight if pattern based on the respective timeStamps + + :param tidSet: timeStamps of the pattern + :return: the calculated weight of the timeStamps + """ + hashcode = 0 + for i in tidSet: + hashcode += i + if hashcode < 0: + hashcode = abs(0 - hashcode) + return hashcode % self._tableSize + + def _contains(self, itemSet, val, hashcode): + """ + To check if the key(hashcode) is in dictionary(hashing) variable + + :param itemSet: generated periodic-frequent itemSet + :param val: support and periodicity of itemSet + :param hashcode: the key generated in calculate() method for every itemSet + + :return: true if itemSet with same support present in dictionary(hashing) or else returns false + """ + if self._hashing.get(hashcode) is None: + return False + for i in self._hashing[hashcode]: + itemSetX = i + if val[0] == self._hashing[hashcode][itemSetX][0] and set(itemSetX).issuperset(itemSet): + return True + return False + + def _getPeriodAndSupport(self, timeStamps): + """ + Calculates the periodicity and support of timeStamps + + :param timeStamps: timeStamps of itemSet + :return: periodicity and support + """ + timeStamps.sort() + cur = 0 + per = 0 + sup = 0 + for j in range(len(timeStamps)): + per = max(per, timeStamps[j] - cur) + if per > self._maxPer: + return [0, 0] + cur = timeStamps[j] + sup += 1 + per = max(per, self._lno - cur) + return [sup, per] + + def _save(self, prefix, suffix, tidSetX): + """ + Saves the generated pattern which satisfies the closed property + Parameters: + ----------- + prefix: the prefix part of itemSet + suffix: the suffix part of itemSet + tidSetX: the timeStamps of the generated itemSet + + Returns: + -------- + saves the closed periodic-frequent pattern + + """ + if prefix is None: + prefix = suffix + else: + prefix = prefix + suffix + prefix = list(set(prefix)) + prefix.sort() + val = self._getPeriodAndSupport(tidSetX) + if val[0] >= self._minSup and val[1] <= self._maxPer: + hashcode = self._calculate(tidSetX) + if self._contains(prefix, val, hashcode) is False: + self._itemSetCount += 1 + sample = str() + for i in prefix: + sample = sample + i + " " + self._finalPatterns[sample] = val + if hashcode not in self._hashing: + self._hashing[hashcode] = {tuple(prefix): val} + else: + self._hashing[hashcode][tuple(prefix)] = val + + def _processEquivalenceClass(self, prefix, itemSets, tidSets): + """ + identifies and saves closed periodic patterns of length more than 2 in a dataset, by processing equivalence classes of item sets that satisfy a minimum support condition. + :param prefix: Prefix class of an itemSet + :param itemSets: suffix items in periodicFrequentItems that satisfies the minSup condition + :param tidSets: timeStamps of items in itemSets respectively + :return: closed periodic patterns with length more than 2 + """ + if len(itemSets) == 1: + i = itemSets[0] + tidList = tidSets[0] + self._save(prefix, [i], tidList) + return + if len(itemSets) == 2: + itemI = itemSets[0] + tidSetI = tidSets[0] + itemJ = itemSets[1] + tidSetJ = tidSets[1] + y1 = list(set(tidSetI).intersection(tidSetJ)) + if len(y1) >= self._minSup: + suffix = [] + suffix += [itemI, itemJ] + suffix = list(set(suffix)) + self._save(prefix, suffix, y1) + if len(y1) != len(tidSetI): + self._save(prefix, [itemI], tidSetI) + if len(y1) != len(tidSetJ): + self._save(prefix, [itemJ], tidSetJ) + return + for i in range(len(itemSets)): + itemX = itemSets[i] + if itemX is None: + continue + tidSetX = tidSets[i] + classItemSets = [] + classTidSets = [] + itemSetX = [itemX] + for j in range(i + 1, len(itemSets)): + itemJ = itemSets[j] + if itemJ is None: + continue + tidSetJ = tidSets[j] + y = list(set(tidSetX).intersection(tidSetJ)) + if len(y) < self._minSup: + continue + if len(tidSetX) == len(tidSetJ) and len(y) == len(tidSetX): + itemSets.insert(j, None) + tidSets.insert(j, None) + itemSetX.append(itemJ) + elif len(tidSetX) < len(tidSetJ) and len(y) == len(tidSetX): + itemSetX.append(itemJ) + elif len(tidSetX) > len(tidSetJ) and len(y) == len(tidSetJ): + itemSets.insert(j, None) + tidSets.insert(j, None) + classItemSets.append(itemJ) + classTidSets.append(y) + else: + classItemSets.append(itemJ) + classTidSets.append(y) + if len(classItemSets) > 0: + newPrefix = list(set(itemSetX)) + prefix + self._processEquivalenceClass(newPrefix, classItemSets, classTidSets) + self._save(prefix, list(set(itemSetX)), tidSetX) + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self): + """ + Mining process will start from here + """ + self._startTime = _ab._time.time() + self._finalPatterns = {} + self._hashing = {} + periodicFrequentItems = self._scanDatabase() + for i in range(len(periodicFrequentItems)): + itemX = periodicFrequentItems[i] + if itemX is None: + continue + tidSetX = self._tidList[itemX] + itemSetX = [itemX] + itemSets = [] + tidSets = [] + for j in range(i + 1, len(periodicFrequentItems)): + itemJ = periodicFrequentItems[j] + if itemJ is None: + continue + tidSetJ = self._tidList[itemJ] + y1 = list(set(tidSetX).intersection(tidSetJ)) + if len(y1) < self._minSup: + continue + if len(tidSetX) == len(tidSetJ) and len(y1) is len(tidSetX): + periodicFrequentItems.insert(j, None) + itemSetX.append(itemJ) + elif len(tidSetX) < len(tidSetJ) and len(y1) is len(tidSetX): + itemSetX.append(itemJ) + elif len(tidSetX) > len(tidSetJ) and len(y1) is len(tidSetJ): + periodicFrequentItems.insert(j, None) + itemSets.append(itemJ) + tidSets.append(y1) + else: + + itemSets.append(itemJ) + tidSets.append(y1) + if len(itemSets) > 0: + self._processEquivalenceClass(itemSetX, itemSets, tidSets) + self._save([], itemSetX, tidSetX) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Closed periodic frequent patterns were generated successfully using CPFPMiner algorithm ")
+ + +
+[docs] + def Mine(self): + """ + Mining process will start from here + """ + self._startTime = _ab._time.time() + self._finalPatterns = {} + self._hashing = {} + periodicFrequentItems = self._scanDatabase() + for i in range(len(periodicFrequentItems)): + itemX = periodicFrequentItems[i] + if itemX is None: + continue + tidSetX = self._tidList[itemX] + itemSetX = [itemX] + itemSets = [] + tidSets = [] + for j in range(i + 1, len(periodicFrequentItems)): + itemJ = periodicFrequentItems[j] + if itemJ is None: + continue + tidSetJ = self._tidList[itemJ] + y1 = list(set(tidSetX).intersection(tidSetJ)) + if len(y1) < self._minSup: + continue + if len(tidSetX) == len(tidSetJ) and len(y1) is len(tidSetX): + periodicFrequentItems.insert(j, None) + itemSetX.append(itemJ) + elif len(tidSetX) < len(tidSetJ) and len(y1) is len(tidSetX): + itemSetX.append(itemJ) + elif len(tidSetX) > len(tidSetJ) and len(y1) is len(tidSetJ): + periodicFrequentItems.insert(j, None) + itemSets.append(itemJ) + tidSets.append(y1) + else: + + itemSets.append(itemJ) + tidSets.append(y1) + if len(itemSets) > 0: + self._processEquivalenceClass(itemSetX, itemSets, tidSets) + self._save([], itemSetX, tidSetX) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Closed periodic frequent patterns were generated successfully using CPFPMiner algorithm ")
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a, b[0], b[1]]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Periodicity']) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.replace(' ', '\t').strip() + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self): + """ Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Total number of Closed Periodic Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = CPFPMiner(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = CPFPMiner(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + print("Total number of Closed Periodic-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/maximal/MaxPFGrowth.html b/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/maximal/MaxPFGrowth.html new file mode 100644 index 000000000..95aafb660 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/maximal/MaxPFGrowth.html @@ -0,0 +1,997 @@ + + + + + + PAMI.periodicFrequentPattern.maximal.MaxPFGrowth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.periodicFrequentPattern.maximal.MaxPFGrowth

+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.periodicFrequentPattern.maximal import MaxPFGrowth as alg
+#
+#             obj = alg.MaxPFGrowth("../basic/sampleTDB.txt", "2", "6")
+#
+#             obj.startMine()
+#
+#             Patterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(Patterns))
+#
+#             obj.save("patterns")
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+
+from PAMI.periodicFrequentPattern.maximal import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+
+from PAMI.periodicFrequentPattern.basic import abstract as _ab
+import pandas as pd
+from deprecated import deprecated
+
+#global maximalTree
+_minSup = float()
+_maxPer = float()
+_lno = int()
+
+
+class _Node(object):
+    """
+     A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+
+        item : int
+            storing item of a node
+        timeStamps : list
+            To maintain the timestamps of Database at the end of the branch
+        parent : node
+            To maintain the parent of every node
+        children : list
+            To maintain the children of node
+
+    :Methods:
+
+        addChild(itemName)
+            storing the children to their respective parent nodes
+    """
+    def __init__(self, item: int, children: list) -> None:
+        self.item = item
+        self.children = children
+        self.parent = None
+        self.timeStamps = []
+
+    def addChild(self, node) -> None:
+        """
+        To add the children details to the parent node children list
+
+        :param node: children node
+
+        :return: adding to parent node children
+        """
+        self.children[node.item] = node
+        node.parent = self
+
+
+class _Tree(object):
+    """
+    A class used to represent the frequentPatternGrowth tree structure
+
+    :Attributes:
+
+        root : Node
+            Represents the root node of the tree
+        summaries : dictionary
+            storing the nodes with same item name
+        info : dictionary
+            stores the support of items
+
+    :Methods:
+
+        addTransaction(Database)
+            creating Database as a branch in frequentPatternTree
+        getConditionPatterns(Node)
+            generates the conditional patterns from tree for specific node
+        conditionalTransaction(prefixPaths,Support)
+            takes the prefixPath of a node and support at child of the path and extract the frequent items from
+            prefixPaths and generates prefixPaths with items which are frequent
+        remove(Node)
+            removes the node from tree once after generating all the patterns respective to the node
+        generatePatterns(Node)
+            starts from the root node of the tree and mines the frequent patterns
+    """
+    def __init__(self) -> None:
+        self.root = _Node(None, {})
+        self.summaries = {}
+        self.info = {}
+        #self.maximalTree = _MPTree()
+
+    def addTransaction(self, transaction: List[Any], tid: List[int]) -> None:
+        """
+        adding transaction into database
+
+        :param transaction: transactions in a database
+        :param tid: timestamp of the transaction in database
+        :return: pftree
+        """
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _Node(transaction[i], {})
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].append(newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+        currentNode.timeStamps = currentNode.timeStamps + tid
+
+    def getConditionalPatterns(self, alpha: Any) -> Tuple[List[List[Any]], List[List[int]], Dict[Any, List[int]]]:
+        """
+        to get the conditional patterns of a node
+
+        :param alpha: node in the tree
+        :return: conditional patterns of a node
+        """
+        finalPatterns = []
+        finalSets = []
+        for i in self.summaries[alpha]:
+            set1 = i.timeStamps
+            set2 = []
+            while i.parent.item is not None:
+                set2.append(i.parent.item)
+                i = i.parent
+            if len(set2) > 0:
+                set2.reverse()
+                finalPatterns.append(set2)
+                finalSets.append(set1)
+        finalPatterns, finalSets, info = _conditionalTransactions(finalPatterns, finalSets)
+        return finalPatterns, finalSets, info
+
+    def removeNode(self, nodeValue: Any) -> None:
+        """
+        removes the leaf node by pushing its timestamps to parent node
+
+        :param nodeValue: node of a tree
+        :return: None
+        """
+        for i in self.summaries[nodeValue]:
+            i.parent.timeStamps = i.parent.timeStamps + i.timeStamps
+            del i.parent.children[nodeValue]
+            i = None
+
+    def getTimeStamps(self, alpha: Any) -> List[int]:
+        """
+        to get all the timestamps related to a node in tree
+
+        :param alpha: node of a tree
+        :return: timestamps of a node
+        """
+        temp = []
+        for i in self.summaries[alpha]:
+            temp += i.timeStamps
+        return temp
+
+    def generatePatterns(self, prefix: List[Any], patterns: Dict[Tuple[Any], Tuple[int, int]], maximalTree: Any) -> None:
+        """
+        To generate the maximal periodic frequent patterns
+
+        :param prefix: an empty list of itemSet to form the combinations
+        :return: maximal periodic frequent patterns
+        """
+        #global maximalTree
+        for i in sorted(self.summaries, key=lambda x: (self.info.get(x), -x)):
+            pattern = prefix[:]
+            pattern.append(i)
+            condPattern, timeStamps, info = self.getConditionalPatterns(i)
+            conditionalTree = _Tree()
+            conditionalTree.info = info.copy()
+            head = pattern[:]
+            tail = []
+            for k in info:
+                tail.append(k)
+            sub = head + tail
+            if maximalTree.checkerSub(sub) == 1:
+                for pat in range(len(condPattern)):
+                    conditionalTree.addTransaction(condPattern[pat], timeStamps[pat])
+                if len(condPattern) >= 1:
+                    conditionalTree.generatePatterns(pattern, patterns, maximalTree)
+                else:
+                    maximalTree.addTransaction(pattern)
+                    patterns[tuple(pattern)] = self.info[i]
+            self.removeNode(i)
+
+
+class _MNode(object):
+    """
+    A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+
+        item : int
+            storing item of a node
+        children : list
+            To maintain the children of node
+
+    :Methods:
+
+        addChild(itemName)
+            storing the children to their respective parent nodes
+    """
+    def __init__(self, item: Any, children: Dict[Any, Any]) -> None:
+        self.item = item
+        self.children = children
+
+    def addChild(self, node: Any) -> None:
+        """
+        To add the children details to parent node children variable
+
+        :param node: children node
+        :return: adding children node to parent node
+        """
+        self.children[node.item] = node
+        node.parent = self
+
+
+class _MPTree(object):
+    """
+    A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+
+        root : node
+            the root of a tree
+        summaries : dict
+            to store the items with same name into dictionary
+
+    :Methods:
+
+        addTransaction(itemSet)
+            the generated periodic-frequent pattern is added into maximal-tree
+        checkerSub(itemSet)
+            to check of subset of itemSet is present in tree
+    """
+    def __init__(self) -> None:
+        self.root = _MNode(None, {})
+        self.summaries = {}
+
+    def addTransaction(self, transaction: List[Any]) -> None:
+        """
+        to add the transaction in maximal tree
+
+        :param transaction: resultant periodic frequent pattern
+        :return: maximal tree
+        """
+        currentNode = self.root
+        transaction.sort()
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _MNode(transaction[i], {})
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].insert(0, newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+
+    def checkerSub(self, items: List[Any]) -> int:
+        """
+        To check subset present of items in the maximal tree
+
+        :param items: the pattern to check for subsets
+        :return: 1
+        """
+        items.sort(reverse=True)
+        item = items[0]
+        if item not in self.summaries:
+            return 1
+        else:
+            if len(items) == 1:
+                return 0
+        for t in self.summaries[item]:
+            cur = t.parent
+            i = 1
+            while cur.item is not None:
+                if items[i] == cur.item:
+                    i += 1
+                    if i == len(items):
+                        return 0
+                cur = cur.parent
+        return 1
+
+
+#maximalTree = _MPTree()
+
+
+def _getPeriodAndSupport(timeStamps: List[int]) -> List[Union[int, float]]:
+    """
+    To calculate the periodicity and support of a pattern with their respective timeStamps
+
+    :param timeStamps: timeStamps
+    :return: Support and periodicity
+    """
+    timeStamps.sort()
+    cur = 0
+    per = 0
+    sup = 0
+    for j in range(len(timeStamps)):
+        per = max(per, timeStamps[j] - cur)
+        if per > _maxPer:
+            return [0, 0]
+        cur = timeStamps[j]
+        sup += 1
+    per = max(per, abs(_lno - cur))
+    return [sup, per]
+
+
+def _conditionalTransactions(condPatterns: List[List[int]], condTimeStamps: List[List[int]]) -> Tuple[List[List[int]], List[List[int]], Dict[int, Tuple[int, float]]]:
+    """
+    To calculate the timestamps of conditional items in conditional patterns
+
+    :param condPatterns: conditional patterns of node
+    :param condTimeStamps: timeStamps of a conditional patterns
+    :return: removing items with low minSup or periodicity and sort the conditional transactions
+    """
+    pat = []
+    timeStamps = []
+    data1 = {}
+    for i in range(len(condPatterns)):
+        for j in condPatterns[i]:
+            if j in data1:
+                data1[j] = data1[j] + condTimeStamps[i]
+            else:
+                data1[j] = condTimeStamps[i]
+    updatedDict = {}
+    for m in data1:
+        updatedDict[m] = _getPeriodAndSupport(data1[m])
+    updatedDict = {k: v for k, v in updatedDict.items() if v[0] >= _minSup and v[1] <= _maxPer}
+    count = 0
+    for p in condPatterns:
+        p1 = [v for v in p if v in updatedDict]
+        trans = sorted(p1, key=lambda x: (updatedDict.get(x)[0], -x), reverse=True)
+        if len(trans) > 0:
+            pat.append(trans)
+            timeStamps.append(condTimeStamps[count])
+        count += 1
+    return pat, timeStamps, updatedDict
+
+
+
+[docs] +class MaxPFGrowth(_ab._periodicFrequentPatterns): + """ + :Description: MaxPF-Growth is one of the fundamental algorithm to discover maximal periodic-frequent + patterns in a temporal database. + + :Reference: R. Uday Kiran, Yutaka Watanobe, Bhaskar Chaudhury, Koji Zettsu, Masashi Toyoda, Masaru Kitsuregawa, + "Discovering Maximal Periodic-Frequent Patterns in Very Large Temporal Databases", + IEEE 2020, https://ieeexplore.ieee.org/document/9260063 + + :param iFile: str : + Name of the Input file to mine complete set of periodic frequent pattern's + :param oFile: str : + Name of the output file to store complete set of periodic frequent pattern's + :param minSup: str: + Controls the minimum number of transactions in which every item must appear in a database. + :param maxPer: float: + Controls the maximum number of transactions in which any two items within a pattern can reappear. + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + iFile : file + Name of the Input file or path of the input file + oFile : file + Name of the output file or path of the output file + minSup: int or float or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + maxPer: int or float or str + The user can specify maxPer either in count or proportion of database size. + If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + Otherwise, it will be treated as float. + Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default separator is tab space or \t. + However, the users can override their default separator. + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transaction + tree : class + it represents the Tree class + itemSetCount : int + it represents the total no of patterns + finalPatterns : dict + it represents to store the patterns + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of periodic-frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of periodic-frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets(fileName) + Scans the dataset or dataframes and stores in list format + PeriodicFrequentOneItem() + Extracts the one-periodic-frequent patterns from Databases + updateDatabases() + update the Databases by removing aperiodic items and sort the Database by item decreased support + buildTree() + after updating the Databases ar added into the tree by setting root node as null + startMine() + the main method to run the program + + **Executing the code on terminal:** + ------------------------------------- + .. code-block:: console + + + Format: + + (.venv) $ python3 maxpfrowth.py <inputFile> <outputFile> <minSup> <maxPer> + + Examples usage : + + (.venv) $ python3 maxpfrowth.py sampleTDB.txt patterns.txt 0.3 0.4 + + + .. note:: minSup will be considered in percentage of database transactions + + **Sample run of the imported code:** + ------------------------------------------ + .. code-block:: python + + from PAMI.periodicFrequentPattern.maximal import MaxPFGrowth as alg + + obj = alg.MaxPFGrowth("../basic/sampleTDB.txt", "2", "6") + + obj.startMine() + + Patterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(Patterns)) + + obj.save("patterns") + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + --------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.\n + + """ + _startTime = float() + _endTime = float() + _minSup = str() + _maxPer = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _rank = {} + _rankedUp = {} + _lno = 0 + _patterns = {} + _maximalTree = str() + + def __init__(self, iFile: Any, minSup: Union[int, float, str], maxPer: Union[int, float, str], sep: str='\t') -> None: + super().__init__(iFile, minSup, maxPer, sep) + + def _creatingItemSets(self) -> None: + """ + Storing the complete Databases of the database/input file in a database variable + :rtype: storing transactions into Database variable + :return: None + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + data, ts = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + ts = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + for i in range(len(data)): + tr = [ts[i][0]] + data[i] + self._Database.append(tr) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _periodicFrequentOneItem(self) -> Dict[Any, List[Union[int, float]]]: + """ + calculates the support of each item in the dataset and assign the ranks to the items + by decreasing support and returns the frequent items list + + :rtype: return the one-length periodic frequent patterns + + """ + data = {} + for tr in self._Database: + for i in range(1, len(tr)): + if tr[i] not in data: + data[tr[i]] = [int(tr[0]), int(tr[0]), 1] + else: + data[tr[i]][0] = max(data[tr[i]][0], (int(tr[0]) - data[tr[i]][1])) + data[tr[i]][1] = int(tr[0]) + data[tr[i]][2] += 1 + for key in data: + data[key][0] = max(data[key][0], abs(len(self._Database) - data[key][1])) + data = {k: [v[2], v[0]] for k, v in data.items() if v[0] <= self._maxPer and v[2] >= self._minSup} + pfList = [k for k, v in sorted(data.items(), key=lambda x: (x[1][0], x[0]), reverse=True)] + self._rank = dict([(index, item) for (item, index) in enumerate(pfList)]) + return data + + def _updateDatabases(self, dict1: Dict[Any, List[Union[int, float]]]) -> List[List[Union[int, float]]]: + """ + Remove the items which are not frequent from Databases and updates the Databases with rank of items + + :param dict1: frequent items with support + :type dict1: dictionary + :rtype: sorted and updated transactions + """ + list1 = [] + for tr in self._Database: + list2 = [int(tr[0])] + for i in range(1, len(tr)): + if tr[i] in dict1: + list2.append(self._rank[tr[i]]) + if len(list2) >= 2: + basket = list2[1:] + basket.sort() + list2[1:] = basket[0:] + list1.append(list2) + return list1 + + @staticmethod + def _buildTree(data: List[List[Union[int, float]]], info: Dict[Any, List[Union[int, float]]]) -> Any: + """ + it takes the Databases and support of each item and construct the main tree with setting root node as null + + :param data: it represents the one Databases in database + :type data: list + :param info: it represents the support of each item + :type info: dictionary + :rtype: returns root node of tree + """ + + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(data)): + set1 = [data[i][0]] + rootNode.addTransaction(data[i][1:], set1) + return rootNode + + def _savePeriodic(self, itemSet: List[Any]) -> List[Any]: + """ + To convert the ranks of items in to their original item names + + :param itemSet: frequent pattern. + :return: frequent pattern with original item names + """ + t1 = [] + for i in itemSet: + t1.append(self._rankedUp[i]) + return t1 + + def _convert(self, value: Union[int, float, str]) -> Union[int, float]: + """ + To convert the given user specified value + + :param value: user specified value + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self) -> None: + """ + Mining process will start from this function + :return: None + """ + + global _minSup, _maxPer, _lno + self._patterns = {} + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minSup is None: + raise Exception("Please enter the Minimum Support") + self._creatingItemSets() + self._minSup = self._convert(self._minSup) + self._maxPer = self._convert(self._maxPer) + _minSup, _maxPer, _lno = self._minSup, self._maxPer, len(self._Database) + if self._minSup > len(self._Database): + raise Exception("Please enter the minSup in range between 0 to 1") + _generatedItems = self._periodicFrequentOneItem() + _updatedDatabases = self._updateDatabases(_generatedItems) + for x, y in self._rank.items(): + self._rankedUp[y] = x + _info = {self._rank[k]: v for k, v in _generatedItems.items()} + _Tree = self._buildTree(_updatedDatabases, _info) + self._finalPatterns = {} + self._maximalTree = _MPTree() + _Tree.generatePatterns([], self._patterns, self._maximalTree) + for x, y in self._patterns.items(): + pattern = str() + x = self._savePeriodic(x) + for i in x: + pattern = pattern + i + " " + self._finalPatterns[pattern] = y + self._endTime = _ab._time.time() + _process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = _process.memory_full_info().uss + self._memoryRSS = _process.memory_info().rss + print("Maximal Periodic Frequent patterns were generated successfully using MAX-PFPGrowth algorithm ")
+ + +
+[docs] + def Mine(self) -> None: + """ + Mining process will start from this function + :return: None + """ + + global _minSup, _maxPer, _lno + self._patterns = {} + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minSup is None: + raise Exception("Please enter the Minimum Support") + self._creatingItemSets() + self._minSup = self._convert(self._minSup) + self._maxPer = self._convert(self._maxPer) + _minSup, _maxPer, _lno = self._minSup, self._maxPer, len(self._Database) + if self._minSup > len(self._Database): + raise Exception("Please enter the minSup in range between 0 to 1") + _generatedItems = self._periodicFrequentOneItem() + _updatedDatabases = self._updateDatabases(_generatedItems) + for x, y in self._rank.items(): + self._rankedUp[y] = x + _info = {self._rank[k]: v for k, v in _generatedItems.items()} + _Tree = self._buildTree(_updatedDatabases, _info) + self._finalPatterns = {} + self._maximalTree = _MPTree() + _Tree.generatePatterns([], self._patterns, self._maximalTree) + for x, y in self._patterns.items(): + pattern = str() + x = self._savePeriodic(x) + for i in x: + pattern = pattern + i + " " + self._finalPatterns[pattern] = y + self._endTime = _ab._time.time() + _process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = _process.memory_full_info().uss + self._memoryRSS = _process.memory_info().rss + print("Maximal Periodic Frequent patterns were generated successfully using MAX-PFPGrowth algorithm ")
+ + + +
+[docs] + def getMemoryUSS(self) -> float: + """Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """Calculating the total amount of runtime taken by the mining process + + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final periodic-frequent patterns in a dataframe + + :return: returning periodic-frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a, b[0], b[1]]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Periodicity']) + return dataFrame
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of periodic-frequent patterns will be loaded in to a output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.replace(' ', '\t').strip() + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> Dict[str, Tuple[int, int]]: + """ + Function to send the set of periodic-frequent patterns after completion of the mining process + + :return: returning periodic-frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + print("Total number of Maximal Periodic Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = MaxPFGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = MaxPFGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + print("Total number of Maximal Periodic-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + for i in [100, 200, 300, 400, 500]: + _ap = MaxPFGrowth('/Users/Likhitha/Downloads/temporal_T10I4D100K.csv', i, 5000, '\t') + _ap.startMine() + print("Total number of Maximal Partial Periodic Patterns:", len(_ap.getPatterns())) + _ap.save('/Users/Likhitha/Downloads/output.txt') + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/topk/TopkPFP/TopkPFP.html b/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/topk/TopkPFP/TopkPFP.html new file mode 100644 index 000000000..d0eea66f0 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/topk/TopkPFP/TopkPFP.html @@ -0,0 +1,663 @@ + + + + + + PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP

+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             import PAMI.periodicFrequentPattern.topk.TopkPFPGrowth as alg
+#
+#             obj = alg.TopkPFPGrowth(iFile, k, maxPer,oFile)
+#
+#             obj.startMine()
+#
+#             periodicFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(periodicFrequentPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.periodicFrequentPattern.basic import abstract as _ab
+import pandas as pd
+from deprecated import deprecated
+
+
+
+[docs] +class TopkPFPGrowth(_ab._periodicFrequentPatterns): + """ + :Description: Top - K is and algorithm to discover top periodic frequent patterns in a temporal database. + + :Reference: Komate Amphawan, Philippe Lenca, Athasit Surarerks: "Mining Top-K Periodic-Frequent Pattern from Transactional Databases without Support Threshold" + International Conference on Advances in Information Technology: https://link.springer.com/chapter/10.1007/978-3-642-10392-6_3 + + :param iFile: str : + Name of the Input file to mine complete set of periodic frequent pattern's + :param oFile: str : + Name of the output file to store complete set of periodic frequent pattern's + :param maxPer: str: + Controls the maximum number of transactions in which any two items within a pattern can reappear. + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + iFile : str + Input file name or path of the input file + k: int + User specified counte of top frequent patterns + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + oFile : str + Name of the output file or the path of the output file + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + finalPatterns: dict + Storing the complete set of patterns in a dictionary variable + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets() + Scans the dataset or dataframes and stores in list format + frequentOneItem() + Generates one frequent patterns + eclatGeneration(candidateList) + It will generate the combinations of frequent items + generateFrequentPatterns(tidList) + It will generate the combinations of frequent items from a list of items + + **Executing the code on terminal:** + ------------------------------------- + .. code-block:: console + + + Format: + + (.venv) $ python3 TopkPFP.py <inputFile> <outputFile> <k> <maxPer> + + Examples: + + (.venv) $ python3 TopkPFP.py sampleDB.txt patterns.txt 10 3 + + + **Sample run of the importing code:** + --------------------------------------- + .. code-block:: python + + import PAMI.periodicFrequentPattern.topk.TopkPFPGrowth as alg + + obj = alg.TopkPFPGrowth(iFile, k, maxPer) + + obj.startMine() + + periodicFrequentPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(periodicFrequentPatterns)) + + obj.save(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + -------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + _startTime = float() + _endTime = float() + _k = int() + _maxPer = " " + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _tidList = {} + _lno = int() + _minimum = int() + _mapSupport = {} + + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + data, ts = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + ts = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + if 'Patterns' in i: + data = self._iFile['Patterns'].tolist() + for i in range(len(data)): + tr = [ts[i][0]] + tr = tr + data[i] + self._Database.append(tr) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _convert(self, value): + """ + To convert the given user specified value + + :param value: user specified value + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + def _frequentOneItem(self): + """ + Generating one frequent patterns + """ + + self._mapSupport = {} + self._tidList = {} + n = 0 + for line in self._Database: + self._lno += 1 + n = int(line[0]) + for i in range(1, len(line)): + si = line[i] + if self._mapSupport.get(si) is None: + self._mapSupport[si] = [1, abs(0 - n), n] + self._tidList[si] = [n] + else: + self._mapSupport[si][0] += 1 + self._mapSupport[si][1] = max(self._mapSupport[si][1], abs(n - self._mapSupport[si][2])) + self._mapSupport[si][2] = n + self._tidList[si].append(n) + for x, y in self._mapSupport.items(): + self._mapSupport[x][1] = max(self._mapSupport[x][1], abs(n - self._mapSupport[x][2])) + self._maxPer = self._convert(self._maxPer) + self._k = self._convert(self._k) + self._mapSupport = {k: [v[0], v[1]] for k, v in self._mapSupport.items() if v[1] <= self._maxPer} + plist = [key for key, value in sorted(self._mapSupport.items(), key=lambda x: (x[1][0], x[0]), reverse=True)] + self._finalPatterns = {} + #print(len(plist)) + for i in plist: + if len(self._finalPatterns) >= self._k: + break + else: + self._finalPatterns[i] = [self._mapSupport[i][0], self._mapSupport[i][1]] + self._minimum = min([self._finalPatterns[i][0] for i in self._finalPatterns.keys()]) + plist = list(self._finalPatterns.keys()) + return plist + + def _getSupportAndPeriod(self, timeStamps): + """To calculate the periodicity and support + + :param timeStamps: Timestamps of an item set + :return: support, periodicity + """ + + global lno + timeStamps.sort() + cur = 0 + per = list() + sup = 0 + for j in range(len(timeStamps)): + per.append(timeStamps[j] - cur) + cur = timeStamps[j] + sup += 1 + per.append(self._lno - cur) + if len(per) == 0: + return [0, 0] + return [sup, max(per)] + + def _save(self, prefix, suffix, tidSetI): + """Saves the patterns that satisfy the periodic frequent property. + + :param prefix: the prefix of a pattern + :type prefix: list + :param suffix: the suffix of a patterns + :type suffix: list + :param tidSetI: the timestamp of a patterns + :type tidSetI: list + """ + + if prefix is None: + prefix = suffix + else: + prefix = prefix + suffix + val = self._getSupportAndPeriod(tidSetI) + sample = str() + for i in prefix: + sample = sample + i + " " + if len(self._finalPatterns) < self._k: + if val[0] >= self._minimum: + self._finalPatterns[sample] = val + self._finalPatterns = {k: v for k, v in + sorted(self._finalPatterns.items(), key=lambda item: item[1], reverse=True)} + self._minimum = min([self._finalPatterns[i][0] for i in self._finalPatterns.keys()]) + else: + for x, y in sorted(self._finalPatterns.items(), key=lambda x: x[1][0]): + if val[0] > y[0]: + del self._finalPatterns[x] + self._finalPatterns[x] = y + self._finalPatterns = {k: v for k, v in + sorted(self._finalPatterns.items(), key=lambda item: item[1], reverse=True)} + self._minimum = min([self._finalPatterns[i][0] for i in self._finalPatterns.keys()]) + return + + def _Generation(self, prefix, itemSets, tidSets): + """ + Equivalence class is followed and checks for the patterns generated for periodic-frequent patterns. + + :param prefix: main equivalence prefix + :type prefix: periodic-frequent item or pattern + :param itemSets: patterns which are items combined with prefix and satisfying the periodicity + and frequent with their timestamps + :type itemSets: list + :param tidSets: timestamps of the items in the argument itemSets + :type tidSets: list + """ + if len(itemSets) == 1: + i = itemSets[0] + tidI = tidSets[0] + self._save(prefix, [i], tidI) + return + for i in range(len(itemSets)): + itemI = itemSets[i] + if itemI is None: + continue + tidSetI = tidSets[i] + classItemSets = [] + classTidSets = [] + itemSetX = [itemI] + for j in range(i + 1, len(itemSets)): + itemJ = itemSets[j] + tidSetJ = tidSets[j] + y = list(set(tidSetI).intersection(tidSetJ)) + val = self._getSupportAndPeriod(y) + if val[0] >= self._minimum and val[1] <= self._maxPer: + classItemSets.append(itemJ) + classTidSets.append(y) + newPrefix = list(set(itemSetX)) + prefix + self._Generation(newPrefix, classItemSets, classTidSets) + self._save(prefix, list(set(itemSetX)), tidSetI) + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self): + """ + Main function of the program + """ + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._k is None: + raise Exception("Please enter the Minimum Support") + self._creatingItemSets() + _plist = self._frequentOneItem() + for i in range(len(_plist)): + itemI = _plist[i] + tidSetI = self._tidList[itemI] + itemSetX = [itemI] + itemSets = [] + tidSets = [] + for j in range(i + 1, len(_plist)): + itemJ = _plist[j] + tidSetJ = self._tidList[itemJ] + y1 = list(set(tidSetI).intersection(tidSetJ)) + val = self._getSupportAndPeriod(y1) + if val[0] >= self._minimum and val[1] <= self._maxPer: + itemSets.append(itemJ) + tidSets.append(y1) + self._Generation(itemSetX, itemSets, tidSets) + print("TopK Periodic Frequent patterns were generated successfully") + self._endTime = _ab._time.time() + _process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryRSS = float() + self._memoryUSS = float() + self._memoryUSS = _process.memory_full_info().uss + self._memoryRSS = _process.memory_info().rss
+ + +
+[docs] + def Mine(self): + """ + Main function of the program + """ + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._k is None: + raise Exception("Please enter the Minimum Support") + self._creatingItemSets() + _plist = self._frequentOneItem() + for i in range(len(_plist)): + itemI = _plist[i] + tidSetI = self._tidList[itemI] + itemSetX = [itemI] + itemSets = [] + tidSets = [] + for j in range(i + 1, len(_plist)): + itemJ = _plist[j] + tidSetJ = self._tidList[itemJ] + y1 = list(set(tidSetI).intersection(tidSetJ)) + val = self._getSupportAndPeriod(y1) + if val[0] >= self._minimum and val[1] <= self._maxPer: + itemSets.append(itemJ) + tidSets.append(y1) + self._Generation(itemSetX, itemSets, tidSets) + print("TopK Periodic Frequent patterns were generated successfully") + self._endTime = _ab._time.time() + _process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryRSS = float() + self._memoryUSS = float() + self._memoryUSS = _process.memory_full_info().uss + self._memoryRSS = _process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self): + """Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a, b[0], b[1]]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Periodicity']) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """Complete set of frequent patterns will be loaded in to a output file + + :param outFile: name of the output file + :type outFile: file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.replace(' ', '\t') + ":" + f'{y[0]}:{y[1]}' + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getPatterns(self): + """ Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + print("Top K Periodic Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = TopkPFPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = TopkPFPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + print("Top K Periodic-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/topk/kPFPMiner/kPFPMiner.html b/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/topk/kPFPMiner/kPFPMiner.html new file mode 100644 index 000000000..1f3c5340a --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/periodicFrequentPattern/topk/kPFPMiner/kPFPMiner.html @@ -0,0 +1,615 @@ + + + + + + PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner

+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+
+#             import PAMI.periodicFrequentPattern.kPFPMiner as alg
+#
+#             obj = alg.kPFPMiner(iFile, k)
+#
+#             obj.startMine()
+#
+#             periodicFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of top-k Periodic Frequent Patterns:", len(periodicFrequentPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.periodicFrequentPattern.basic import abstract as _ab
+import pandas as pd
+from deprecated import deprecated
+
+from PAMI.periodicFrequentPattern.topk.kPFPMiner import abstract as _ab
+
+
+
+[docs] +class kPFPMiner(_ab._periodicFrequentPatterns): + """ + :Description: Top - K is and algorithm to discover top periodic-frequent patterns in a temporal database. + + :Reference: Likhitha, P., Ravikumar, P., Kiran, R.U., Watanobe, Y. (2022). + Discovering Top-k Periodic-Frequent Patterns in Very Large Temporal Databases. Big Data Analytics. + BDA 2022. Lecture Notes in Computer Science, vol 13773. Springer, Cham. https://doi.org/10.1007/978-3-031-24094-2_14 + + :param iFile: str : + Name of the Input file to mine complete set of periodic frequent pattern's + :param oFile: str : + Name of the output file to store complete set of periodic frequent pattern's + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + iFile : str + Input file name or path of the input file + k: int + User specified counte of top-k periodic frequent patterns + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + oFile : str + Name of the output file or the path of the output file + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + finalPatterns: dict + Storing the complete set of patterns in a dictionary variable + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + savePatterns(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets() + Scans the dataset or dataframes and stores in list format + frequentOneItem() + Generates one frequent patterns + eclatGeneration(candidateList) + It will generate the combinations of frequent items + generateFrequentPatterns(tidList) + It will generate the combinations of frequent items from a list of items + + **Executing the code on terminal:** + ------------------------------------------ + .. code-block:: console + + + Format: + + + (.venv) $ python3 kPFPMiner.py <inputFile> <outputFile> <k> + + Examples : + + (.venv) $ python3 kPFPMiner.py sampleDB.txt patterns.txt 10 + + + **Sample run of the importing code: + -------------------------------------- + .. code-block:: python + + import PAMI.periodicFrequentPattern.kPFPMiner as alg + + obj = alg.kPFPMiner(iFile, k) + + obj.startMine() + + periodicFrequentPatterns = obj.getPatterns() + + print("Total number of top-k Periodic Frequent Patterns:", len(periodicFrequentPatterns)) + + obj.save(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + -------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + _startTime = float() + _endTime = float() + _k = int() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _tidList = {} + lno = int() + _maximum = int() + + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + + # print(self.Database) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + +
+[docs] + def getPer_Sup(self, tids): + tids.sort() + cur=0 + per=list() + sup=0 + #print(tids) + for i in range(len(tids)-1): + j = i + 1 + #if tids[j] - cur <= periodicity: + #return [0,0] + per.append(tids[j] - cur) + cur = tids[j] + per.append(self.lno - cur) + return max(per)
+ + + def _frequentOneItem(self): + """ + Generating one frequent patterns + """ + self._mapSupport = {} + self._tidList = {} + n = 0 + for line in self._Database: + self.lno += 1 + n = int(line[0]) + for i in range(1, len(line)): + si = line[i] + if self._mapSupport.get(si) is None: + self._mapSupport[si] = [1, abs(0 - n), n] + self._tidList[si] = [n] + else: + self._mapSupport[si][0] += 1 + self._mapSupport[si][1] = max(self._mapSupport[si][1], abs(n - self._mapSupport[si][2])) + self._mapSupport[si][2] = n + self._tidList[si].append(n) + for x, y in self._mapSupport.items(): + self._mapSupport[x][1] = max(self._mapSupport[x][1], abs(n - self._mapSupport[x][2])) + plist = [key for key, value in sorted(self._mapSupport.items(), key=lambda x: x[1], reverse=True)] + for i in plist: + if len(self._finalPatterns) >= self._k: + break + else: + self._finalPatterns[i] = self._mapSupport[i][1] + self._maximum = max([self._finalPatterns[i] for i in self._finalPatterns.keys()]) + plist = list(self._finalPatterns.keys()) + return plist + + + def _save(self, prefix, suffix, tidSetI): + """Saves the patterns that satisfy the periodic frequent property. + + :param prefix: the prefix of a pattern + :type prefix: list + :param suffix: the suffix of a patterns + :type suffix: list + :param tidSetI: the timestamp of a patterns + :type tidSetI: list + """ + + if prefix is None: + prefix = suffix + else: + prefix = prefix + suffix + val = self.getPer_Sup(tidSetI) + sample = str() + for i in prefix: + sample = sample + i + " " + if len(self._finalPatterns) < self._k: + if val < self._maximum: + self._finalPatterns[sample] = val + self._finalPatterns = {k: v for k, v in sorted(self._finalPatterns.items(), key=lambda item: item[1], reverse=True)} + self._maximum = max([i for i in self._finalPatterns.values()]) + else: + for x, y in sorted(self._finalPatterns.items(), key=lambda x: x[1], reverse=True): + if val < y: + del self._finalPatterns[x] + self._finalPatterns[sample] = val + self._finalPatterns = {k: v for k, v in + sorted(self._finalPatterns.items(), key=lambda item: item[1], + reverse=True)} + self._maximum = max([i for i in self._finalPatterns.values()]) + return + + def _Generation(self, prefix, itemSets, tidSets): + """Equivalence class is followed and checks for the patterns generated for periodic-frequent patterns. + + :param prefix: main equivalence prefix + :type prefix: periodic-frequent item or pattern + :param itemSets: patterns which are items combined with prefix and satisfying the periodicity and frequent with their timestamps + :type itemSets: list + :param tidSets: timestamps of the items in the argument itemSets + :type tidSets: list + + """ + if len(itemSets) == 1: + i = itemSets[0] + tidI = tidSets[0] + self._save(prefix, [i], tidI) + return + for i in range(len(itemSets)): + itemI = itemSets[i] + if itemI is None: + continue + tidSetI = tidSets[i] + classItemSets = [] + classTidSets = [] + itemSetX = [itemI] + for j in range(i + 1, len(itemSets)): + itemJ = itemSets[j] + tidSetJ = tidSets[j] + y = list(set(tidSetI).intersection(tidSetJ)) + if self.getPer_Sup(y) <= self._maximum: + classItemSets.append(itemJ) + classTidSets.append(y) + newPrefix = list(set(itemSetX)) + prefix + self._Generation(newPrefix, classItemSets, classTidSets) + self._save(prefix, list(set(itemSetX)), tidSetI) + + def _convert(self, value): + """ + to convert the type of user specified minSup value + + :param value: user specified minSup value + :return: converted type + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = ((len(self._Database)) * value) + else: + value = int(value) + return value + +
+[docs] + def startMine(self): + """ + Main function of the program + + """ + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._k is None: + raise Exception("Please enter the Minimum Support") + self._creatingItemSets() + self._k = self._convert(self._k) + plist = self._frequentOneItem() + for i in range(len(plist)): + itemI = plist[i] + tidSetI = self._tidList[itemI] + itemSetX = [itemI] + itemSets = [] + tidSets = [] + for j in range(i + 1, len(plist)): + itemJ = plist[j] + tidSetJ = self._tidList[itemJ] + y1 = list(set(tidSetI).intersection(tidSetJ)) + if self.getPer_Sup(y1) <= self._maximum: + itemSets.append(itemJ) + tidSets.append(y1) + self._Generation(itemSetX, itemSets, tidSets) + print("kPFPMiner has successfully generated top-k frequent patterns") + self._endTime = _ab._time.time() + self._memoryUSS = float() + self._memoryRSS = float() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self): + """Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a, b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'periodicity']) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """Complete set of frequent patterns will be loaded in to a output file + + :param outFile: name of the output file + + :type outFile: file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x + ":" + str(y) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getPatterns(self): + """ Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + print("Total number of Top-k Periodic Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = kPFPMiner(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = kPFPMiner(_ab._sys.argv[1], _ab._sys.argv[3]) + _ap.startMine() + _Patterns = _ap.getPatterns() + print("Total number of top-k periodic frequent patterns:", len(_Patterns)) + _ap.save(_ab._sys.argv[2]) + _memUSS = _ap.getMemoryUSS() + print("Total Memory in USS:", _memUSS) + _memRSS = _ap.getMemoryRSS() + print("Total Memory in RSS", _memRSS) + _run = _ap.getRuntime() + print("Total ExecutionTime in ms:", _run) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") + + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/recurringPattern/basic/RPGrowth.html b/sphinx/_build/html/_modules/PAMI/recurringPattern/basic/RPGrowth.html new file mode 100644 index 000000000..867f6599a --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/recurringPattern/basic/RPGrowth.html @@ -0,0 +1,926 @@ + + + + + + PAMI.recurringPattern.basic.RPGrowth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.recurringPattern.basic.RPGrowth

+
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.periodicFrequentPattern.recurring import RPGrowth as alg
+#
+#             obj = alg.RPGrowth(iFile, maxPer, minPS, minRec)
+#
+#             obj.startMine()
+#
+#             periodicFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns))
+#
+#             obj.savePatterns(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.recurringPattern.basic import abstract as _ab
+import pandas as pd
+from deprecated import deprecated
+from PAMI.recurringPattern.basic import abstract as _ab
+
+_maxPer = float()
+_minPS = float()
+_minRec = float()
+_lno = int()
+
+
+class _Node(object):
+    """
+        A class used to represent the node of frequentPatternTree
+
+        :Attributes:
+
+            item : int or None
+                Storing item of a node
+            timeStamps : list
+                To maintain the timestamps of a database at the end of the branch
+            parent : node
+                To maintain the parent of every node
+            children : list
+                To maintain the children of a node
+
+        :Methods:
+
+            addChild(itemName)
+                Storing the children to their respective parent nodes
+        """
+
+    def __init__(self, item, children):
+        """ Initializing the Node class
+
+        :param item: Storing the item of a node
+        :type item: int or None
+        :param children: To maintain the children of a node
+        :type children: dict
+        """
+
+        self.item = item
+        self.children = children
+        self.parent = None
+        self.timeStamps = []
+
+    def addChild(self, node):
+        """
+        To add the children to a node
+
+        :param node: parent node in the tree
+        """
+
+        self.children[node.item] = node
+        node.parent = self
+
+
+class _Tree(object):
+    """
+        A class used to represent the frequentPatternGrowth tree structure
+
+        :Attributes:
+
+            root : Node
+                Represents the root node of the tree
+            summaries : dictionary
+                Storing the nodes with same item name
+            info : dictionary
+                Stores the support of the items
+
+        :Methods:
+
+            addTransactions(Database)
+                Creating transaction as a branch in Recurring PatternTree
+            getConditionalPatterns(Node)
+                Generates the conditional patterns from tree for specific node
+            conditionalTransaction(prefixPaths,Support)
+                Takes the prefixPath of a node and support at child of the path and extract the frequent patterns from
+                prefixPaths and generates prefixPaths with items which are frequent
+            remove(Node)
+                Removes the node from tree once after generating all the patterns respective to the node
+            generatePatterns(Node)
+                Starts from the root node of the tree and mines the periodic-frequent patterns
+
+        """
+
+    def __init__(self):
+        self.root = _Node(None, {})
+        self.summaries = {}
+        self.info = {}
+
+    def addTransaction(self, transaction, tid):
+        """
+        Adding a transaction into tree
+
+        :param transaction: To represent the complete database
+        :type transaction: list
+        :param tid: To represent the timestamp of a database
+        :type tid: list
+        :return: rp-tree
+        """
+
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _Node(transaction[i], {})
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].append(newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+        currentNode.timeStamps = currentNode.timeStamps + tid
+
+    def getConditionalPatterns(self, alpha):
+        """
+        Generates all the conditional patterns of a respective node
+
+        :param alpha: To represent a Node in the tree
+        :type alpha: Node
+        :return: A tuple consisting of finalPatterns, conditional pattern base and information
+        """
+        finalPatterns = []
+        finalSets = []
+        for i in self.summaries[alpha]:
+            set1 = i.timeStamps
+            set2 = []
+            while i.parent.item is not None:
+                set2.append(i.parent.item)
+                i = i.parent
+            if len(set2) > 0:
+                set2.reverse()
+                finalPatterns.append(set2)
+                finalSets.append(set1)
+        finalPatterns, finalSets, info = self.conditionalDatabases(finalPatterns, finalSets)
+        return finalPatterns, finalSets, info
+
+    @staticmethod
+    def generateTimeStamps(node):
+        """
+        To get the timestamps of a node
+
+        :param node: A node in the tree
+        :return: Timestamps of a node
+        """
+
+        finalTimeStamps = node.timeStamps
+        return finalTimeStamps
+
+    def removeNode(self, nodeValue):
+        """
+        Removing the node from tree
+
+        :param nodeValue: To represent a node in the tree
+        :type nodeValue: node
+        :return: Tree with their nodes updated with timestamps
+        """
+
+        for i in self.summaries[nodeValue]:
+            i.parent.timeStamps = i.parent.timeStamps + i.timeStamps
+            del i.parent.children[nodeValue]
+
+    def getTimeStamps(self, alpha):
+        """
+        To get all the timestamps of the nodes which share same item name
+
+        :param alpha: Node in a tree
+        :return: Timestamps of a  node
+        """
+        temporary = []
+        for i in self.summaries[alpha]:
+            temporary += i.timeStamps
+        return temporary
+
+    @staticmethod
+    def getSupportAndPeriod(timeStamps):
+        """
+        To calculate the recurrence and support
+
+        :param timeStamps: Timestamps of an item set
+        :return: recurring intervals with corresponding periodic support, summation of support of periodic intervals, support
+        """
+
+        global _maxPer,_minPS
+        timeStamps.sort()
+        cur = ' '
+        st = ' '
+        end = ' '
+        if len(timeStamps) > 0:
+            cur = timeStamps[0]
+            st = timeStamps[0]
+            end = timeStamps[0]
+        ps = 0
+        lps = 1
+        recli = []
+        for i in range(1, len(timeStamps)):
+            if abs(timeStamps[i] - cur) <= _maxPer:
+                lps += 1
+            else:
+                if lps >= _minPS:
+                    recli.append([st, end, lps])
+                    ps += lps
+                lps = 1
+                st = timeStamps[i]
+            cur = timeStamps[i]
+            end = cur
+        if lps >= _minPS:
+            recli.append([st, end, lps])
+            ps+=lps
+        # print(recli)
+        return [recli, ps, len(timeStamps)]
+
+    def conditionalDatabases(self, conditionalPatterns, conditionalTimeStamps):
+        """
+        It generates the conditional patterns with periodic-frequent items
+
+        :param conditionalPatterns: conditionalPatterns generated from conditionPattern method of a respective node
+        :type conditionalPatterns: list
+        :param conditionalTimeStamps: Represents the timestamps of a conditional patterns of a node
+        :type conditionalTimeStamps: list
+        :returns: Returns conditional transactions by removing non recurring items
+        """
+
+        global _maxPer, _minPS, _minRec
+        pat = []
+        timeStamps = []
+        data1 = {}
+        for i in range(len(conditionalPatterns)):
+            for j in conditionalPatterns[i]:
+                if j in data1:
+                    data1[j] = data1[j] + conditionalTimeStamps[i]
+                else:
+                    data1[j] = conditionalTimeStamps[i]
+        updatedDictionary = {}
+        for m in data1:
+            updatedDictionary[m] = self.getSupportAndPeriod(data1[m])
+        # print(updatedDictionary)
+        updatedDictionary = {k: [v[0],v[2]] for k, v in updatedDictionary.items() if v[1] >= (_minPS*_minRec)}
+        count = 0
+        for p in conditionalPatterns:
+            p1 = [v for v in p if v in updatedDictionary]
+            trans = sorted(p1, key=lambda x: (updatedDictionary.get(x)[1], -x), reverse=True)
+            if len(trans) > 0:
+                pat.append(trans)
+                timeStamps.append(conditionalTimeStamps[count])
+            count += 1
+        return pat, timeStamps, updatedDictionary
+
+    def generatePatterns(self, prefix):
+        """
+        Generates the patterns
+
+        :param prefix: Forms the combination of items
+        :type prefix: list
+        :returns: yields patterns with their recurrence and support
+        """
+        global _minRec
+        for i in sorted(self.summaries, key=lambda x: (self.info.get(x)[1], -x)):
+            pattern = prefix[:]
+            pattern.append(i)
+            if len(self.info.get(i)[0]) >= _minRec:
+                yield pattern, self.info[i]
+            patterns, timeStamps, info = self.getConditionalPatterns(i)
+            conditionalTree = _Tree()
+            conditionalTree.info = info.copy()
+            for pat in range(len(patterns)):
+                conditionalTree.addTransaction(patterns[pat], timeStamps[pat])
+            if len(patterns) > 0:
+                for q in conditionalTree.generatePatterns(pattern):
+                    yield q
+            self.removeNode(i)
+
+
+
+[docs] +class RPGrowth(_ab._recurringPatterns): + """ + :Description: RPGrowth is one of the fundamental algorithm to discover recurring patterns in a transactional database. + + + :Reference: R. Uday Kiran†, Haichuan Shang†, Masashi Toyoda† and Masaru Kitsuregawa† Discovering Recurring Patterns in Time Series,https://www.tkl.iis.u-tokyo.ac.jp/new/uploads/publication_file/file/693/Paper%2023.pdf + + :param iFile: str : + Name of the Input file to mine complete set of Recurring patterns + :param oFile: str : + Name of the output file to store complete set of Recurring patterns + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + :param minPs: str : + It could potentially represent a minimum parallelism percentage or some other value related to parallel processing. + :param maxPer: float : minRec + It represent a maximum percentage or some other numeric value. + :param minRec: str : + It could represent a minimum recommended value or some other string-based setting. + + :Attributes: + + iFile : file + Name of the Input file or path of the input file + oFile : file + Name of the output file or path of the output file + maxPer : int or float or str + The user can specify maxPer either in count or proportion of database size. + If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + Otherwise, it will be treated as float. + Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float + minPS : int or float or str + The user can specify minPS either in count or proportion of database size. + If the program detects the data type of minPS is integer, then it treats minPS is expressed in count. + Otherwise, it will be treated as float. + Example: minPS=10 will be treated as integer, while minPS=10.0 will be treated as float + minRec : int or float or str + The user has to specify minRec in count. + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime : float + To record the start time of the mining process + endTime : float + To record the completion time of the mining process + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + To represent the total no of transaction + tree : class + To represents the Tree class + itemSetCount : int + To represents the total no of patterns + finalPatterns : dict + To store the complete patterns + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of periodic-frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of periodic-frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets(fileName) + Scans the dataset and stores in a list format + OneItems() + Extracts the possible recurring items of size one from database + updateDatabases() + Update the database by removing non recurring items and sort the Database by item decreased support + buildTree() + After updating the Database, remaining items will be added into the tree by setting root node as null + convert() + to convert the user specified value + + **Methods to execute code on terminal** + ------------------------------------------- + .. code-block:: console + + + Format: + + (.venv) $ python3 RPGrowth.py <inputFile> <outputFile> <maxPer> <minPS> <minRec> + + Example usage: + + (.venv) $ python3 RPGrowth.py sampleTDB.txt patterns.txt 0.3 0.4 2 + + + .. note:: maxPer and minPS will be considered in percentage of database transactions + + **Importing this algorithm into a python program** + -------------------------------------------------------- + .. code-block:: python + + from PAMI.periodicFrequentPattern.recurring import RPGrowth as alg + + obj = alg.RPGrowth(iFile, maxPer, minPS, minRec) + + obj.startMine() + + periodicFrequentPatterns = obj.getPatterns() + + print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns)) + + obj.savePatterns(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + ------------- + The complete program was written by C. Saideep under the supervision of Professor Rage Uday Kiran. + + """ + _startTime = float() + _endTime = float() + _minPS = str() + _maxPer = float() + _minRec = str() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _rank = {} + _rankedUp = {} + _lno = 0 + + def _creatingItemSets(self): + """ + Storing the complete values of the database/input file in a database variable + """ + + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _OneItems(self): + """ + Calculates the maxRec and support of each item in the database and assign ranks to the items + by decreasing support and returns the RP-list + + :return: return the RP-list + """ + #global rank + data = {} + for tr in self._Database: + for i in range(1, len(tr)): + if tr[i] not in data: + data[tr[i]] = [[], int(tr[0]), int(tr[0]), 1, 0, 1] + else: + lp = int(tr[0]) - data[tr[i]][2] + if lp <= self._maxPer: + data[tr[i]][3] += 1 + + else: + if data[tr[i]][3] >= self._minPS: + data[tr[i]][0].append([data[tr[i]][1], data[tr[i]][2], data[tr[i]][3]]) + data[tr[i]][4] += data[tr[i]][3] + data[tr[i]][3] = 1 + data[tr[i]][1] = int(tr[0]) + data[tr[i]][2] = int(tr[0]) + data[tr[i]][5] += 1 + # print(data) + + for ri in data: + if data[ri][3] >= self._minPS: + data[ri][0].append([data[ri][1], data[ri][2], data[ri][3]]) + data[ri][4] += data[ri][3] + data = {k: [v[0], v[5]] for k, v in data.items() if v[4] >= (self._minPS*self._minRec)} + genList = [k for k, v in sorted(data.items(), key=lambda x: (x[1][1], x[0]), reverse=True)] + self._rank = dict([(index, item) for (item, index) in enumerate(genList)]) + return data, genList + + def _updateDatabases(self, dict1): + """ + Remove the items which does not satisfy maxRec from database and updates the database with rank of items + + :param dict1: Recurring items with support and recurrence + :type dict1: dictionary + :return: Sorted and updated transactions + """ + list1 = [] + for tr in self._Database: + list2 = [int(tr[0])] + for i in range(1, len(tr)): + if tr[i] in dict1: + list2.append(self._rank[tr[i]]) + if len(list2) >= 2: + basket = list2[1:] + basket.sort() + list2[1:] = basket[0:] + list1.append(list2) + # print(list2) + return list1 + + @staticmethod + def _buildTree(data, info): + """ + It takes the database and construct the main tree by setting root node as a null + + :param data: it represents the one items in database + :type data: list + :param info: it represents the support and recurrence of each item + :type info: dictionary + :return: returns root node of tree + """ + + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(data)): + set1 = [data[i][0]] + rootNode.addTransaction(data[i][1:], set1) + return rootNode + + def _savePeriodic(self, itemSet): + """ + To convert the ranks of items in to their original item names + + :param itemSet: recurring pattern. + :return: recurring pattern with original item names + """ + t1 = str() + for i in itemSet: + t1 = t1 + self._rankedUp[i] + "\t" + return t1 + + def _convert(self, value): + """ + To convert the given user specified value + + :param value: user specified value + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self): + """ + Mining process will start from this function + """ + global _minPS, _minRec, _maxPer, _lno + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + self._creatingItemSets() + self._minPS = self._convert(self._minPS) + self._maxPer = self._convert(self._maxPer) + self._minRec = int(self._minRec) + self._finalPatterns = {} + _maxPer, _minPS, _minRec, _lno = self._maxPer, self._minPS, self._minRec, len(self._Database) + generatedItems, pfList = self._OneItems() + updatedDatabases = self._updateDatabases(generatedItems) + for x, y in self._rank.items(): + self._rankedUp[y] = x + info = {self._rank[k]: v for k, v in generatedItems.items()} + Tree = self._buildTree(updatedDatabases, info) + patterns = Tree.generatePatterns([]) + for i in patterns: + sample = self._savePeriodic(i[0]) + self._finalPatterns[sample] = i[1] + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Recurring patterns were generated successfully using RPGrowth algorithm ")
+ + +
+[docs] + def Mine(self): + """ + Mining process will start from this function + """ + global _minPS, _minRec, _maxPer, _lno + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + self._creatingItemSets() + self._minPS = self._convert(self._minPS) + self._maxPer = self._convert(self._maxPer) + self._minRec = int(self._minRec) + self._finalPatterns = {} + _maxPer, _minPS, _minRec, _lno = self._maxPer, self._minPS, self._minRec, len(self._Database) + generatedItems, pfList = self._OneItems() + updatedDatabases = self._updateDatabases(generatedItems) + for x, y in self._rank.items(): + self._rankedUp[y] = x + info = {self._rank[k]: v for k, v in generatedItems.items()} + Tree = self._buildTree(updatedDatabases, info) + patterns = Tree.generatePatterns([]) + for i in patterns: + sample = self._savePeriodic(i[0]) + self._finalPatterns[sample] = i[1] + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Recurring patterns were generated successfully using RPGrowth algorithm ")
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final periodic-frequent patterns in a dataframe + + :return: returning periodic-frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + s = str() + for i in a: + s = s + i + ' ' + z = [] + str1 = '{' + for z in b[0]: + str1 += '{' + str([z[0], z[1]]) + ' : ' + str(z[2]) + '}' + str1 += '}' + data.append([s.replace('\t', ' '), b[1], len(b[0]), str1]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Recurrance', 'intervals']) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of periodic-frequent patterns will be loaded in to a output file + + :param outFile: name of the output file. + :type outFile: file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s = str() + for i in x: + s = s + i + '\t' + str1 = '{' + for z in y[0]: + str1 += '{'+str([z[0], z[1]])+' : ' + str(z[2]) + '}' + str1 += '}' + s1 = s.strip() + ":" + str(y[1]) + ":" + str(len(y[0])) + ":" + str1 + writer.write("%s \n" % s1) + writer.close()
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of periodic-frequent patterns after completion of the mining process + + :return: returning periodic-frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + print("Total number of recurrent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 6 or len(_ab._sys.argv) == 7: + if len(_ab._sys.argv) == 7: + _ap = RPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5], _ab._sys.argv[6]) + if len(_ab._sys.argv) == 6: + _ap = RPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + _ap.startMine() + print("Total number of Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/relativeFrequentPattern/basic/RSFPGrowth.html b/sphinx/_build/html/_modules/PAMI/relativeFrequentPattern/basic/RSFPGrowth.html new file mode 100644 index 000000000..48653cf37 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/relativeFrequentPattern/basic/RSFPGrowth.html @@ -0,0 +1,928 @@ + + + + + + PAMI.relativeFrequentPattern.basic.RSFPGrowth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.relativeFrequentPattern.basic.RSFPGrowth

+# RSFPGrowth algorithm is used to find all items with relative support from given dataset
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.relativeFrequentPattern import RSFPGrowth as alg
+#
+#             obj = alg.RSFPGrowth(iFile, minSup, __minRatio)
+#
+#             obj.startMine()
+#
+#             frequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(frequentPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.relativeFrequentPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+import pandas as pd
+
+from PAMI.relativeFrequentPattern.basic import abstract as _ab
+import pandas as pd
+from deprecated import deprecated
+
+
+class _Node:
+    """
+    A class used to represent the node of frequent Pattern tree
+
+    :Attributes:
+
+        itemId: int
+            storing item of a node
+        counter: int
+            To maintain the support of node
+        parent: node
+            To maintain the parent of every node
+        child: list
+            To maintain the children of node
+        nodeLink : node
+            Points to the node with same itemId
+
+    :Methods:
+
+        getChild(itemName)
+            returns the node with same itemName from frequent Pattern tree
+    """
+
+    def __init__(self) -> None:
+        self.itemId = -1
+        self.counter = 1
+        self.parent = None
+        self.child = []
+        self.nodeLink = None
+
+    def getChild(self, itemName: int) -> Union[None, '_Node']:
+        """
+        Retrieving the child from the tree
+
+        :param itemName: name of the child
+        :type itemName: list
+        :return: returns the node with same itemName from frequentPatternTree
+        :rtype: None or Node
+
+        """
+        for i in self.child:
+            if i.itemId == itemName:
+                return i
+        return None
+
+
+class _Tree:
+    """
+    A class used to represent the frequentPatternGrowth tree structure
+
+    :Attributes:
+
+        headerList : list
+            storing the list of items in tree sorted in ascending of their supports
+        mapItemNodes : dictionary
+            storing the nodes with same item name
+        mapItemLastNodes : dictionary
+            representing the map that indicates the last node for each item
+        root : Node
+            representing the root Node in a tree
+
+    :Methods:
+
+        createHeaderList(items,minSup)
+            takes items only which are greater than minSup and sort the items in ascending order
+        addTransaction(transaction)
+            creating transaction as a branch in frequentPatternTree
+        fixNodeLinks(item,newNode)
+            To create the link for nodes with same item
+        printTree(Node)
+            gives the details of node in frequentPatternGrowth tree
+        addPrefixPath(prefix,port,minSup)
+           It takes the items in prefix pattern whose support is >=minSup and construct a subtree
+    """
+
+    def __init__(self) -> None:
+        self.headerList = []
+        self.mapItemNodes = {}
+        self.mapItemLastNodes = {}
+        self.root = _Node()
+
+    def addTransaction(self, transaction: List[int]) -> None:
+        """
+        Adding transaction into tree
+
+        :param transaction: it represents the one transaction in database
+        :type transaction: list
+        :return: None
+        """
+
+        # This method taken a transaction as input and returns the tree
+        current = self.root
+        for i in transaction:
+            child = current.getChild(i)
+            if not child:
+                newNode = _Node()
+                newNode.itemId = i
+                newNode.parent = current
+                current.child.append(newNode)
+                self.fixNodeLinks(i, newNode)
+                current = newNode
+            else:
+                child.counter += 1
+                current = child
+
+    def fixNodeLinks(self, item: int, newNode: '_Node') -> None:
+        """
+        Fixing node link for the newNode that inserted into frequentPatternTree
+
+        :param item: it represents the item of newNode
+        :type item: int
+        :param newNode: it represents the newNode that inserted in frequentPatternTree
+        :type newNode: Node
+        :return: None
+
+        """
+        if item in self.mapItemLastNodes.keys():
+            lastNode = self.mapItemLastNodes[item]
+            lastNode.nodeLink = newNode
+        self.mapItemLastNodes[item] = newNode
+        if item not in self.mapItemNodes.keys():
+            self.mapItemNodes[item] = newNode
+
+    def printTree(self, root: '_Node') -> None:
+        """
+        Print the details of Node in frequentPatternTree
+
+        :param root: it represents the Node in frequentPatternTree
+        :type root: Node
+        :return: None
+
+        """
+
+        # this method is used print the details of tree
+        if not root.child:
+            return
+        else:
+            for i in root.child:
+                print(i.itemId, i.counter, i.parent.itemId)
+                self.printTree(i)
+
+    def createHeaderList(self, __mapSupport: Dict[int, int], minSup: float) -> None:
+        """
+        To create the headerList
+
+        :param __mapSupport: it represents the items with their supports
+        :type __mapSupport: dictionary
+        :param minSup: it represents the minSup
+        :param minSup: float
+        :return: None
+        """
+        # the frequentPatternTree always maintains the header table to start the mining from leaf nodes
+        t1 = []
+        for x, y in __mapSupport.items():
+            if y >= minSup:
+                t1.append(x)
+        __itemSetBuffer = [k for k, v in sorted(__mapSupport.items(), key=lambda x: x[1], reverse=True)]
+        self.headerList = [i for i in t1 if i in __itemSetBuffer]
+
+    def addPrefixPath(self, prefix: List['_Node'], __mapSupportBeta: Dict[int, int], minSup: float) -> None:
+        """
+        To construct the conditional tree with prefix paths of a node in frequentPatternTree
+
+        :param prefix: it represents the prefix items of a Node
+        :type prefix: list
+        :param __mapSupportBeta: it represents the items with their supports
+        :param __mapSupportBeta: dictionary
+        :param minSup: to check the item meets with minSup
+        :param minSup: float
+        """
+        # this method is used to add prefix paths in conditional trees of frequentPatternTree
+        pathCount = prefix[0].counter
+        current = self.root
+        prefix.reverse()
+        for i in range(0, len(prefix) - 1):
+            pathItem = prefix[i]
+            if __mapSupportBeta.get(pathItem.itemId) >= minSup:
+                child = current.getChild(pathItem.itemId)
+                if not child:
+                    newNode = _Node()
+                    newNode.itemId = pathItem.itemId
+                    newNode.parent = current
+                    newNode.counter = pathCount
+                    current.child.append(newNode)
+                    current = newNode
+                    self.fixNodeLinks(pathItem.itemId, newNode)
+                else:
+                    child.counter += pathCount
+                    current = child
+
+
+
+[docs] +class RSFPGrowth(_ab._frequentPatterns): + """ + :Description: Algorithm to find all items with relative support from given dataset + + :Reference: 'Towards Efficient Discovery of Frequent Patterns with Relative Support' R. Uday Kiran and + Masaru Kitsuregawa, http://comad.in/comad2012/pdf/kiran.pdf + + :param iFile: str : + Name of the Input file to mine complete set of Relative frequent pattern's + :param oFile: str : + Name of the output file to store complete set of Relative frequent patterns + :param minSup: str: + Controls the minimum number of transactions in which every item must appear in a database. + :param minRS: float: + Controls the minimum number of transactions in which at least one time within a pattern must appear in a database. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the Input file to mine complete set of frequent patterns + oFile : file + Name of the output file to store complete set of frequent patterns + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime : float + To record the completion time of the mining process + minSup : float + The user given minSup + minRS : float + The user given minRS + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transactions + tree : class + it represents the Tree class + itemSetCount : int + it represents the total no of patterns + finalPatterns : dict + it represents to store the patterns + itemSetBuffer : list + it represents the store the items in mining + maxPatternLength : int + it represents the constraint for pattern length + + :Methods: + + startMine() + Mining process will start from here + getFrequentPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getmemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + check(line) + To check the delimiter used in the user input file + creatingItemSets(fileName) + Scans the dataset or dataframes and stores in list format + frequentOneItem() + Extracts the one-frequent patterns from transactions + saveAllCombination(tempBuffer,s,position,prefix,prefixLength) + Forms all the combinations between prefix and tempBuffer lists with support(s) + saveItemSet(pattern,support) + Stores all the frequent patterns with their respective support + frequentPatternGrowthGenerate(frequentPatternTree,prefix,port) + Mining the frequent patterns by forming conditional frequentPatternTrees to particular prefix item. + __mapSupport represents the 1-length items with their respective support + + + **Methods to execute code on terminal** + ---------------------------------------------- + .. code-block:: console + + + Format: + + (.venv) $python3 RSFPGrowth.py <inputFile> <outputFile> <minSup> <__minRatio> + + Example Usage : + + (.venv) $python3 python3 RSFPGrowth.py sampleDB.txt patterns.txt 0.23 0.2 + + + .. note:: maxPer and minPS will be considered in percentage of database transactions + + + **Importing this algorithm into a python program** + ----------------------------------------------------- + .. code-block:: python + + from PAMI.relativeFrequentPattern import RSFPGrowth as alg + + obj = alg.RSFPGrowth(iFile, minSup, __minRatio) + + obj.startMine() + + frequentPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(frequentPatterns)) + + obj.save(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getmemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + -------------- + The complete program was written by Sai Chitra.B under the supervision of Professor Rage Uday Kiran. + + """ + + __startTime = float() + __endTime = float() + _minSup = str() + _minRS = float() + __finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + __memoryUSS = float() + __memoryRSS = float() + __Database = [] + __mapSupport = {} + __lno = 0 + __tree = _Tree() + __itemSetBuffer = None + __fpNodeTempBuffer = [] + __itemSetCount = 0 + __maxPatternLength = 1000 + + def __init__(self, iFile: Union[str, pd.DataFrame], minSup: Union[int, float, str], minRS: float, sep: str='\t') -> None: + super().__init__(iFile, minSup, minRS, sep) + self.__finalPatterns = {} + + def __creatingItemSets(self) -> None: + """ + Storing the complete transactions of the __Database/input file in a __Database variable + :return: None + """ + self.__Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self.__Database = self._iFile['Transactions'].tolist() + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self.__Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self.__Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def __frequentOneItem(self) -> None: + """ + Generating One frequent items sets + :return: None + """ + self.__mapSupport = {} + for i in self.__Database: + for j in i: + if j not in self.__mapSupport: + self.__mapSupport[j] = 1 + else: + self.__mapSupport[j] += 1 + + def __saveItemSet(self, prefix: List[int], prefixLength: int, support: int, ratio: float) -> None: + """ + To save the frequent patterns mined form frequentPatternTree + + :param prefix: the frequent pattern + :type prefix: list + :param prefixLength: the length of a frequent pattern + :type prefixLength: int + :param support: the support of a pattern + :type support: int + :return: None + """ + + sample = [] + for i in range(prefixLength): + sample.append(prefix[i]) + self.__itemSetCount += 1 + self.__finalPatterns[tuple(sample)] = str(support) + " : " + str(ratio) + + def __saveAllCombinations(self, tempBuffer: List['_Node'], s: int, position: int, prefix: List[int], prefixLength: int) -> None: + """ + Generating all the combinations for items in single branch in frequentPatternTree + + :param tempBuffer: items in a list. + :type tempBuffer: list + :param s: support at leaf node of a branch + :param position: the length of a tempBuffer + :type position: int + :param prefix: it represents the list of leaf node + :type prefix: list + :param prefixLength: the length of prefix + :type prefixLength: int + + """ + max1 = 1 << position + for i in range(1, max1): + newPrefixLength = prefixLength + for j in range(position): + isSet = i & (1 << j) + if isSet > 0: + prefix.insert(newPrefixLength, tempBuffer[j].itemId) + newPrefixLength += 1 + ratio = s / self.__mapSupport[self.__getMinItem(prefix, newPrefixLength)] + if ratio >= self._minRS: + self.__saveItemSet(prefix, newPrefixLength, s, ratio) + + def __frequentPatternGrowthGenerate(self, frequentPatternTree: '_Tree', prefix: List[int], prefixLength: int, __mapSupport: Dict[int, int], minConf: float) -> None: + """ + Mining the fp tree + + :param frequentPatternTree: it represents the frequentPatternTree + :type frequentPatternTree: class Tree + :param prefix: it represents an empty list and store the patterns that are mined + :type prefix: list + :param prefixLength: the length of prefix + :type prefixLength: int + :param __mapSupport : it represents the support of item + :type __mapSupport : dictionary + """ + singlePath = True + position = 0 + s = 0 + if len(frequentPatternTree.root.child) > 1: + singlePath = False + else: + currentNode = frequentPatternTree.root.child[0] + while True: + if len(currentNode.child) > 1: + singlePath = False + break + self.__fpNodeTempBuffer.insert(position, currentNode) + s = currentNode.counter + position += 1 + if len(currentNode.child) == 0: + break + currentNode = currentNode.child[0] + if singlePath is True: + self.__saveAllCombinations(self.__fpNodeTempBuffer, s, position, prefix, prefixLength) + else: + for i in reversed(frequentPatternTree.headerList): + item = i + support = __mapSupport[i] + CminSup = max(self._minSup, support * self._minRS) + betaSupport = support + prefix.insert(prefixLength, item) + max1 = self.__getMinItem(prefix, prefixLength) + if self.__mapSupport[max1] > self.__mapSupport[item]: + max1 = item + ratio = support / self.__mapSupport[max1] + if ratio >= self._minRS: + self.__saveItemSet(prefix, prefixLength + 1, betaSupport, ratio) + if prefixLength + 1 < self.__maxPatternLength: + prefixPaths = [] + path = frequentPatternTree.mapItemNodes.get(item) + __mapSupportBeta = {} + while path is not None: + if path.parent.itemId != -1: + prefixPath = [path] + pathCount = path.counter + parent1 = path.parent + if __mapSupport.get(parent1.itemId) >= CminSup: + while parent1.itemId != -1: + mins = CminSup + if __mapSupport.get(parent1.itemId) >= mins: + prefixPath.append(parent1) + if __mapSupportBeta.get(parent1.itemId) is None: + __mapSupportBeta[parent1.itemId] = pathCount + else: + __mapSupportBeta[parent1.itemId] = __mapSupportBeta[ + parent1.itemId] + pathCount + parent1 = parent1.parent + else: + break + prefixPaths.append(prefixPath) + path = path.nodeLink + __treeBeta = _Tree() + for k in prefixPaths: + __treeBeta.addPrefixPath(k, __mapSupportBeta, self._minSup) + if len(__treeBeta.root.child) > 0: + __treeBeta.createHeaderList(__mapSupportBeta, self._minSup) + self.__frequentPatternGrowthGenerate(__treeBeta, prefix, prefixLength + 1, __mapSupportBeta, + minConf) + + def __convert(self, value: Union[int, float, str]) -> float: + """ + to convert the type of user specified __minSup value + + :param value: user specified __minSup value + :return: converted type + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self.__Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self.__Database) * value) + else: + value = int(value) + return value + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self) -> None: + """ + Main program to start the operation + :return: None + """ + + self.__startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minSup is None: + raise Exception("Please enter the Minimum Support") + self.__creatingItemSets() + self._minSup = self.__convert(self._minSup) + self._minRS = float(self._minRS) + self.__frequentOneItem() + self.__finalPatterns = {} + self.__mapSupport = {k: v for k, v in self.__mapSupport.items() if v >= self._minSup} + __itemSetBuffer = [k for k, v in sorted(self.__mapSupport.items(), key=lambda x: x[1], reverse=True)] + for i in self.__Database: + transaction = [] + for j in i: + if j in __itemSetBuffer: + transaction.append(j) + transaction.sort(key=lambda val: self.__mapSupport[val], reverse=True) + self.__tree.addTransaction(transaction) + self.__tree.createHeaderList(self.__mapSupport, self._minSup) + if len(self.__tree.headerList) > 0: + self.__itemSetBuffer = [] + self.__frequentPatternGrowthGenerate(self.__tree, self.__itemSetBuffer, 0, self.__mapSupport, self._minRS) + print("Relative support frequent patterns were generated successfully using RSFPGrowth algorithm") + self.__endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self.__memoryRSS = float() + self.__memoryUSS = float() + self.__memoryUSS = process.memory_full_info().uss + self.__memoryRSS = process.memory_info().rss
+ + +
+[docs] + def Mine(self) -> None: + """ + Main program to start the operation + :return: None + """ + + self.__startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minSup is None: + raise Exception("Please enter the Minimum Support") + self.__creatingItemSets() + self._minSup = self.__convert(self._minSup) + self._minRS = float(self._minRS) + self.__frequentOneItem() + self.__finalPatterns = {} + self.__mapSupport = {k: v for k, v in self.__mapSupport.items() if v >= self._minSup} + __itemSetBuffer = [k for k, v in sorted(self.__mapSupport.items(), key=lambda x: x[1], reverse=True)] + for i in self.__Database: + transaction = [] + for j in i: + if j in __itemSetBuffer: + transaction.append(j) + transaction.sort(key=lambda val: self.__mapSupport[val], reverse=True) + self.__tree.addTransaction(transaction) + self.__tree.createHeaderList(self.__mapSupport, self._minSup) + if len(self.__tree.headerList) > 0: + self.__itemSetBuffer = [] + self.__frequentPatternGrowthGenerate(self.__tree, self.__itemSetBuffer, 0, self.__mapSupport, + self._minRS) + print("Relative support frequent patterns were generated successfully using RSFPGrowth algorithm") + self.__endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self.__memoryRSS = float() + self.__memoryUSS = float() + self.__memoryUSS = process.memory_full_info().uss + self.__memoryRSS = process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self.__memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self.__memoryRSS
+ + + def __getMinItem(self, prefix: List[str], prefixLength: int) -> str: + """ + Returns the minItem from prefix + """ + minItem = prefix[0] + for i in range(prefixLength): + if self.__mapSupport[minItem] > self.__mapSupport[prefix[i]]: + minItem = prefix[i] + return minItem + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self.__endTime - self.__startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> pd.DataFrame: + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataframe = {} + data = [] + for a, b in self.__finalPatterns.items(): + pattern = str() + for i in a: + pattern = pattern + i + " " + data.append([pattern, b]) + dataframe = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataframe
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file. + :type outFile: file + :return: None + """ + self.__oFile = outFile + writer = open(self.__oFile, 'w+') + for x, y in self.__finalPatterns.items(): + pattern = str() + for i in x: + pattern = pattern + i + "\t" + s1 = pattern.strip() + ": " + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> Dict[str, str]: + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + res = dict() + for x, y in self.__finalPatterns.items(): + pattern = str() + for i in x: + pattern = pattern + i + "\t" + s1 = str(y) + res[pattern] = s1 + return res
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + :return: None + """ + print("Total number of Relative Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = RSFPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = RSFPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + print("Total number of Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/relativeHighUtilityPattern/basic/RHUIM.html b/sphinx/_build/html/_modules/PAMI/relativeHighUtilityPattern/basic/RHUIM.html new file mode 100644 index 000000000..0ee10462b --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/relativeHighUtilityPattern/basic/RHUIM.html @@ -0,0 +1,1012 @@ + + + + + + PAMI.relativeHighUtilityPattern.basic.RHUIM — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.relativeHighUtilityPattern.basic.RHUIM

+# RHUIM algorithm helps us to mine Relative High Utility itemSets from transactional databases.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.relativeHighUtilityPattern.basic import RHUIM as alg
+#
+#             obj = alg.RHUIM("input.txt", 35, 20)
+#
+#             obj.startMine()
+#
+#             frequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(frequentPatterns))
+#
+#             obj.savePatterns(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+import pandas as pd
+from deprecated import deprecated
+from PAMI.relativeHighUtilityPattern.basic import abstract as _ab
+
+
+class _Transaction:
+    """
+    A class to store Transaction of a database
+
+    :Attributes:
+
+        items: list
+            A list of items in transaction 
+        utilities: list
+            A list of utilities of items in transaction
+        transactionUtility: int
+            represent total sum of all utilities in the database
+        prefixUtility:
+            prefix Utility values of item
+        offset:
+            an offset pointer, used by projected transaction
+
+    :Methods:
+
+        projectedTransaction(offsetE):
+            A method to create new Transaction from existing starting from offsetE until the end
+        getItems():
+            return items in transaction
+        getUtilities():
+            return utilities in transaction
+        getLastPosition():
+            return last position in a transaction
+        removeUnpromisingItems():
+            A method to remove items which are having low values when compared with minUtil
+        insertionSort():
+            A method to sort all items in the transaction
+    """
+    offset = 0
+    prefixUtility = 0
+
+    def __init__(self, items: list, utilities: list, transactionUtility: int) -> None:
+        self.items = items
+        self.utilities = utilities
+        self.transactionUtility = transactionUtility
+
+    def projectTransaction(self, offsetE: int) -> '_Transaction':
+        """
+        A method to create new Transaction from existing transaction starting from offsetE until the end
+
+        :param offsetE: an offset over the original transaction for projecting the transaction
+        :type offsetE: int
+        """
+        new_transaction = _Transaction(self.items, self.utilities, self.transactionUtility)
+        utilityE = self.utilities[offsetE]
+        new_transaction.prefixUtility = self.prefixUtility + utilityE
+        new_transaction.transactionUtility = self.transactionUtility - utilityE
+        for i in range(self.offset, offsetE):
+            new_transaction.transactionUtility -= self.utilities[i]
+        new_transaction.offset = offsetE + 1
+        return new_transaction
+
+    def getItems(self) -> list:
+        """
+        A method to return items in transaction
+        :return: list
+        """
+        return self.items
+
+    def getUtilities(self) -> list:
+        """
+        A method to return utilities in transaction
+        :return: list
+        """
+        return self.utilities
+
+    def getLastPosition(self) -> int:
+        """
+        A method to return last position in a transaction
+        :return: int
+        """
+
+        return len(self.items) - 1
+
+    def removeUnpromisingItems(self, oldNamesToNewNames: dict) -> None:
+        """
+        A method to remove items which are not present in the map passed to the function
+
+        :param oldNamesToNewNames: A map represent old names to new names
+        :type oldNamesToNewNames: map
+        :return: None
+        """
+        tempItems = []
+        tempUtilities = []
+        for idx, item in enumerate(self.items):
+            if item in oldNamesToNewNames:
+                tempItems.append(oldNamesToNewNames[item])
+                tempUtilities.append(self.utilities[idx])
+            else:
+                self.transactionUtility -= self.utilities[idx]
+        self.items = tempItems
+        self.utilities = tempUtilities
+        self.insertionSort()
+
+    def insertionSort(self) -> None:
+        """
+        A method to sort items in order
+        :return: None
+        """
+        for i in range(1, len(self.items)):
+            key = self.items[i]
+            utilityJ = self.utilities[i]
+            j = i - 1
+            while j >= 0 and key < self.items[j]:
+                self.items[j + 1] = self.items[j]
+                self.utilities[j + 1] = self.utilities[j]
+                j -= 1
+            self.items[j + 1] = key
+            self.utilities[j + 1] = utilityJ
+        
+
+class _Dataset:
+    """
+    A class represent the list of transactions in this dataset
+
+   :Attributes:
+
+        transactions:
+            the list of transactions in this dataset
+        maxItem:
+            the largest item name
+        
+   :methods:
+
+        createTransaction(line):
+            Create a transaction object from a line from the input file
+        getMaxItem():
+            return Maximum Item
+        getTransactions():
+            return transactions in database
+
+    """
+    transactions = []
+    maxItem = 0
+    def __init__(self, datasetPath: str, sep: str) -> None:
+        self.strToInt = {}
+        self.intToStr = {}
+        self.cnt = 1
+        self.sep = sep
+        self.createItemSets(datasetPath)
+
+    def createItemSets(self, datasetPath: str) -> None:
+        """
+        Storing the complete transactions of the database/input file in a database variable
+        :return: None
+        """
+        self.transactions = []
+        itemsets, utilities, utilityValues = [], [], []
+        if isinstance(datasetPath, _ab._pd.DataFrame):
+            utilities, data, utilityValues = [], [], []
+            if datasetPath.empty:
+                print("its empty..")
+            i = datasetPath.columns.values.tolist()
+            if 'Transactions' in i:
+                itemsets = datasetPath['Transactions'].tolist()
+            if 'Utilities' in i:
+                utilities = datasetPath['Patterns'].tolist()
+            if 'UtilitySum' in i:
+                utilityValues = datasetPath['utilitySum'].tolist()
+            for k in range(len(itemsets)):
+                self.transactions.append(self.createTransaction(itemsets[k], utilities[k], utilityValues[k]))
+        if isinstance(datasetPath, str):
+            if _ab._validators.url(datasetPath):
+                data = _ab._urlopen(datasetPath)
+                for line in data:
+                    line = line.decode("utf-8")
+                    trans_list = line.strip().split(':')
+                    transactionUtility = int(trans_list[1])
+                    itemsString = trans_list[0].strip().split(self.sep)
+                    itemsString = [x for x in itemsString if x]
+                    utilityString = trans_list[2].strip().split(self.sep)
+                    utilityString = [x for x in utilityString if x]
+                    self.transactions.append(self.createTransaction(itemsString, utilityString, transactionUtility))
+            else:
+                try:
+                    with open(datasetPath, 'r', encoding='utf-8') as f:
+                        for line in f:
+                            trans_list = line.strip().split(':')
+                            transactionUtility = int(trans_list[1])
+                            itemsString = trans_list[0].strip().split(self.sep)
+                            itemsString = [x for x in itemsString if x]
+                            utilityString = trans_list[2].strip().split(self.sep)
+                            utilityString = [x for x in utilityString if x]
+                            self.transactions.append(self.createTransaction(itemsString, utilityString, transactionUtility))
+                except IOError:
+                    print("File Not Found")
+                    quit()
+
+    def createTransaction(self, itemSet: list, utilities: list, utilitySum: int) -> _Transaction:
+        """
+        A method to create Transaction from dataset given
+            
+        :Attributes:
+
+        :param itemSet: represent a transactions itemset in database
+        :type itemSet: list
+        :param utilities: utility values of respective transaction itemSets
+        :type utilities: list
+        :param utilitySum: represent the sum of utility Sum
+        :type utilitySum: int
+        :return : Transaction.
+        :rtype: Transaction
+        """
+        transactionUtility = utilitySum
+        itemsString = itemSet
+        utilityString = utilities
+        items = []
+        utilities = []
+        for idx, item in enumerate(itemsString):
+            if self.strToInt.get(item) is None:
+                self.strToInt[item] = self.cnt
+                self.intToStr[self.cnt] = item
+                self.cnt += 1
+            item_int = self.strToInt.get(item)
+            if item_int > self.maxItem:
+                self.maxItem = item_int
+            items.append(item_int)
+            utilities.append(int(utilityString[idx]))
+        return _Transaction(items, utilities, transactionUtility)
+
+    def getMaxItem(self) -> int:
+        """
+        A method to return name of the largest item
+        :return; int
+        """
+        return self.maxItem
+
+    def getTransactions(self) -> list:
+        """
+        A method to return transactions from database
+        :return: list
+        """
+        return self.transactions
+
+
+
+[docs] +class RHUIM(_ab._utilityPatterns): + """ + :Description: RHUIM algorithm helps us to mine Relative High Utility itemSets from transactional databases. + + :Reference: R. U. Kiran, P. Pallikila, J. M. Luna, P. Fournier-Viger, M. Toyoda and P. K. Reddy, + "Discovering Relative High Utility Itemsets in Very Large Transactional Databases Using Null-Invariant Measure," + 2021 IEEE International Conference on Big Data (Big Data), Orlando, FL, USA, 2021, pp. 252-262, + doi: 10.1109/BigData52589.2021.9672064. + + :param iFile: str : + Name of the Input file to mine complete set of Relative High Utility patterns + :param oFile: str : + Name of the output file to store complete set of Relative High Utility patterns + :param minSup: float or int or str : + minSup measure constraints the minimum number of transactions in a database where a pattern must appear + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + :param minUtil: int : + The minimum utility threshold. + + :Attributes: + + iFile : file + Name of the input file to mine complete set of patterns + oFile : file + Name of the output file to store complete set of patterns + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime : float + To record the start time of the mining process + endTime : float + To record the completion time of the mining process + minUtil : int + The user given minUtil value + minUR : float + The user given minUR value + relativeHighUtilityItemSets : map + set of relative high utility itemSets + candidateCount : int + Number of candidates + utilityBinArrayLU : list + A map to hold the local utility values of the items in database + utilityBinArraySU : list + A map to hold the subtree utility values of the items is database + oldNamesToNewNames : list + A map which contains old names, new names of items as key value pairs + newNamesToOldNames : list + A map which contains new names, old names of items as key value pairs + maxMemory : float + Maximum memory used by this program for running + patternCount : int + Number of RHUI's + itemsToKeep : list + keep only the promising items i.e items that can extend other items to form RHUIs + itemsToExplore : list + list of items that needs to be explored + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + backTrackingRHUIM(transactionsOfP, itemsToKeep, itemsToExplore, prefixLength) + A method to mine the RHUIs Recursively + useUtilityBinArraysToCalculateUpperBounds(transactionsPe, j, itemsToKeep) + A method to calculate the sub-tree utility and local utility of all items that can extend itemSet P and e + output(tempPosition, utility) + A method to output a relative-high-utility itemSet to file or memory depending on what the user chose + is_equal(transaction1, transaction2) + A method to Check if two transaction are identical + useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(dataset) + A method to calculate the sub tree utility values for single items + sortDatabase(self, transactions) + A Method to sort transaction + sort_transaction(self, trans1, trans2) + A Method to sort transaction + useUtilityBinArrayToCalculateLocalUtilityFirstTime(self, dataset) + A method to calculate local utility values for single itemSets + + **Methods to execute code on terminal** + ------------------------------------------- + .. code-block:: console + + Format: + + (.venv) $ python3 RHUIM.py <inputFile> <outputFile> <minUtil> <sep> + + Example usage: + + (.venv) $ python3 RHUIM.py sampleTDB.txt output.txt 35 20 + + + .. note:: minSup will be considered in times of minSup and count of database transactions + + + + **Importing this algorithm into a python program** + ----------------------------------------------------- + .. code-block:: python + + from PAMI.relativeHighUtilityPattern.basic import RHUIM as alg + + obj=alg.RHUIM("input.txt", 35, 20) + + obj.startMine() + + frequentPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(frequentPatterns)) + + obj.savePatterns(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getmemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + ----------------- + The complete program was written by Pradeep Pallikila under the supervision of Professor Rage Uday Kiran. + + """ + + _relativeHighUtilityItemSets = [] + _candidateCount = 0 + _utilityBinArrayLU = {} + _utilityBinArraySU = {} + _oldNamesToNewNames = {} + _newNamesToOldNames = {} + _singleItemSetsUtilities = {} + _strToInt = {} + _intToStr = {} + _temp = [0]*5000 + _patternCount = int() + _maxMemory = 0 + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _nFile = " " + _lno = 0 + _sep = "\t" + _minUtil = 0 + _minUR = 0 + _memoryUSS = float() + _memoryRSS = float() + + def __init__(self, iFile: str, minUtil: int, minUR: float, sep: str="\t") -> None: + super().__init__(iFile, minUtil, minUR, sep) + +
+[docs] + def startMine(self) -> None: + """ + Mining process will start from this function + :return: None + """ + self._startTime = _ab._time.time() + self._dataset = _Dataset(self._iFile, self._sep) + self._finalPatterns = {} + self._useUtilityBinArrayToCalculateLocalUtilityFirstTime(self._dataset) + _minUtil = int(self._minUtil) + _minUR = float(self._minUR) + # print(minUR) + self._singleItemSetsUtilities = _ab._defaultdict(int) + itemsToKeep = [] + for key in self._utilityBinArrayLU.keys(): + if self._utilityBinArrayLU[key] >= _minUtil: + itemsToKeep.append(key) + itemsToKeep = sorted(itemsToKeep, key=lambda x: self._utilityBinArrayLU[x]) + currentName = 1 + for idx, item in enumerate(itemsToKeep): + self._oldNamesToNewNames[item] = currentName + self._newNamesToOldNames[currentName] = item + itemsToKeep[idx] = currentName + currentName += 1 + for transaction in self._dataset.getTransactions(): + transaction.removeUnpromisingItems(self._oldNamesToNewNames) + self.sortDatabase(self._dataset.getTransactions()) + emptyTransactionCount = 0 + for transaction in self._dataset.getTransactions(): + if len(transaction.getItems()) == 0: + emptyTransactionCount += 1 + self._dataset.transactions = self._dataset.transactions[emptyTransactionCount:] + self._useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(self._dataset) + itemsToExplore = [] + for item in itemsToKeep: + if self._utilityBinArraySU[item] >= _minUtil: + itemsToExplore.append(item) + utilitySum = 0 + self._backTrackingRHUIM(self._dataset.getTransactions(), itemsToKeep, itemsToExplore, 0, utilitySum) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Relative High Utility patterns were generated successfully using RHUIM algorithm")
+ + + def _backTrackingRHUIM(self, transactionsOfP: list, itemsToKeep: list, itemsToExplore: list, prefixLength: int, utilitySumP: int) -> None: + """ + A method to mine the RHUIs Recursively + + :Attributes: + + :param transactionsOfP: the list of transactions containing the current prefix P + :type transactionsOfP: list + :param itemsToKeep: the list of secondary items in the p-projected database + :type itemsToKeep: list + :param itemsToExplore: the list of primary items in the p-projected database + :type itemsToExplore: list + :param prefixLength: current prefixLength + :type prefixLength: int + :param utilitySumP: a variable to hold sum of utilities of all items in P + :type utilitySumP int + :return: None + """ + self._candidateCount += len(itemsToExplore) + for idx, e in enumerate(itemsToExplore): + transactionsPe = [] + utilityPe = 0 + utilitySumPe = utilitySumP + self._singleItemSetsUtilities[e] + previousTransaction = transactionsOfP[0] + consecutiveMergeCount = 0 + for transaction in transactionsOfP: + items = transaction.getItems() + if e in items: + positionE = items.index(e) + if transaction.getLastPosition() == positionE: + utilityPe += transaction.getUtilities()[positionE] + transaction.prefixUtility + else: + projectedTransaction = transaction.projectTransaction(positionE) + utilityPe += projectedTransaction.prefixUtility + if previousTransaction == transactionsOfP[0]: + previousTransaction = projectedTransaction + elif self._isEqual(projectedTransaction, previousTransaction): + if consecutiveMergeCount == 0: + items = previousTransaction.items[previousTransaction.offset:] + utilities = previousTransaction.utilities[previousTransaction.offset:] + itemsCount = len(items) + positionPrevious = 0 + positionProjection = projectedTransaction.offset + while positionPrevious < itemsCount: + utilities[positionPrevious] += projectedTransaction.utilities[positionProjection] + positionPrevious += 1 + positionProjection += 1 + previousTransaction.prefixUtility += projectedTransaction.prefixUtility + sumUtilities = previousTransaction.prefixUtility + previousTransaction = _Transaction(items, utilities, previousTransaction.transactionUtility + projectedTransaction.transactionUtility) + previousTransaction.prefixUtility = sumUtilities + else: + positionPrevious = 0 + positionProjected = projectedTransaction.offset + itemsCount = len(previousTransaction.items) + while positionPrevious < itemsCount: + previousTransaction.utilities[positionPrevious] += projectedTransaction.utilities[ + positionProjected] + positionPrevious += 1 + positionProjected += 1 + previousTransaction.transactionUtility += projectedTransaction.transactionUtility + previousTransaction.prefixUtility += projectedTransaction.prefixUtility + consecutiveMergeCount += 1 + else: + transactionsPe.append(previousTransaction) + previousTransaction = projectedTransaction + consecutiveMergeCount = 0 + transaction.offset = positionE + if previousTransaction != transactionsOfP[0]: + transactionsPe.append(previousTransaction) + self._temp[prefixLength] = self._newNamesToOldNames[e] + utility_ratio_pe = float(utilityPe / utilitySumPe) + if (utilityPe >= self._minUtil) and (utility_ratio_pe * 100 >= self._minUR): + self._output(prefixLength, utilityPe, utility_ratio_pe) + self._useUtilityBinArraysToCalculateUpperBounds(transactionsPe, idx, itemsToKeep) + newItemsToKeep = [] + newItemsToExplore = [] + for l in range(idx + 1, len(itemsToKeep)): + itemK = itemsToKeep[l] + utility_sum_pek = utilitySumPe + self._singleItemSetsUtilities[itemK] + subtree_utility_ratio = float(self._utilityBinArraySU[itemK] / utility_sum_pek) + local_utility_ratio = float(self._utilityBinArrayLU[itemK] / utility_sum_pek) + if self._utilityBinArraySU[itemK] >= self._minUtil and subtree_utility_ratio * 100 >= self._minUR: + newItemsToExplore.append(itemK) + newItemsToKeep.append(itemK) + elif self._utilityBinArrayLU[itemK] >= self._minUtil and local_utility_ratio * 100 >= self._minUR: + newItemsToKeep.append(itemK) + self._backTrackingRHUIM(transactionsPe, newItemsToKeep, newItemsToExplore, prefixLength + 1, utilitySumPe) + + def _useUtilityBinArraysToCalculateUpperBounds(self, transactionsPe: list, j: int, itemsToKeep: list) -> None: + """ + A method to calculate the subtree utility and local utility of all items that can extend itemSet P U {e} + + :Attributes: + + :param transactionsPe: transactions the projected database for P U {e} + :type transactionsPe: list or Dataset + :param j: the position of j in the list of promising items + :type j:int + :param itemsToKeep :the list of promising items + :type itemsToKeep: list or Dataset + :return: None + """ + for i in range(j + 1, len(itemsToKeep)): + item = itemsToKeep[i] + self._utilityBinArrayLU[item] = 0 + self._utilityBinArraySU[item] = 0 + for transaction in transactionsPe: + sumRemainingUtility = 0 + i = len(transaction.getItems()) - 1 + while i >= transaction.offset: + item = transaction.getItems()[i] + if item in itemsToKeep: + sumRemainingUtility += transaction.getUtilities()[i] + self._utilityBinArraySU[item] += sumRemainingUtility + transaction.prefixUtility + self._utilityBinArrayLU[item] += transaction.transactionUtility + transaction.prefixUtility + i -= 1 + + def _output(self, tempPosition: int, utility: int, utilityRatio: float) -> None: + """ + Method to print relative high utility itemSet + + :Attributes: + + :param tempPosition: position of last item + :type tempPosition : int + :param utility: total utility of itemSet + :type utility: int + :param utilityRatio: utility ratio of an itemSet + :type utilityRatio: float + :return: None + """ + self._patternCount += 1 + s1 = str() + for i in range(0, tempPosition+1): + s1 += self._dataset.intToStr.get((self._temp[i])) + if i != tempPosition: + s1 += "\t" + self._finalPatterns[s1] = [utility, utilityRatio] + + def _isEqual(self, transaction1: _Transaction, transaction2: _Transaction) -> bool: + """ + A method to Check if two transaction are identical + + :Attributes: + + :param transaction1: the first transaction. + :type transaction1: Transaction + :param transaction2: The second transaction. + :type transaction2: Transaction + :return : whether both are identical or not + :rtype: bool + """ + length1 = len(transaction1.items) - transaction1.offset + length2 = len(transaction2.items) - transaction2.offset + if length1 != length2: + return False + position1 = transaction1.offset + position2 = transaction2.offset + while position1 < len(transaction1.items): + if transaction1.items[position1] != transaction2.items[position2]: + return False + position1 += 1 + position2 += 1 + return True + + def _useUtilityBinArrayToCalculateSubtreeUtilityFirstTime(self, dataset: _Dataset) -> None: + """ + Scan the initial database to calculate the subtree utility of each item using a utility-bin array + + :Attributes: + + :param dataset: the transaction database + :type dataset: Dataset + :return: None + """ + for transaction in dataset.getTransactions(): + sumSU = 0 + i = len(transaction.getItems()) - 1 + while i >= 0: + item = transaction.getItems()[i] + currentUtility = transaction.getUtilities()[i] + sumSU += currentUtility + self._singleItemSetsUtilities[item] += currentUtility + if item in self._utilityBinArraySU.keys(): + self._utilityBinArraySU[item] += sumSU + else: + self._utilityBinArraySU[item] = sumSU + i -= 1 + +
+[docs] + def sortDatabase(self, transactions: list) -> None: + """ + A Method to sort transaction + + :Attributes: + + :param transactions: transaction of items + :type transactions: list + :return: sorted transactions. + :rtype: Transactions or list + """ + cmp_items = _ab._functools.cmp_to_key(self.sort_transaction) + transactions.sort(key=cmp_items)
+ + +
+[docs] + def sort_transaction(self, trans1: _Transaction, trans2: _Transaction) -> int: + """ + A Method to sort transaction + + :Attributes: + + :param trans1: the first transaction . + :type trans1: Transaction + :param trans2:the second transaction. + :type trans2: Transaction + :return: sorted transaction. + :rtype: Transaction + """ + trans1_items = trans1.getItems() + trans2_items = trans2.getItems() + pos1 = len(trans1_items) - 1 + pos2 = len(trans2_items) - 1 + if len(trans1_items) < len(trans2_items): + while pos1 >= 0: + sub = trans2_items[pos2] - trans1_items[pos1] + if sub != 0: + return sub + pos1 -= 1 + pos2 -= 1 + return -1 + elif len(trans1_items) > len(trans2_items): + while pos2 >= 0: + sub = trans2_items[pos2] - trans1_items[pos1] + if sub != 0: + return sub + pos1 -= 1 + pos2 -= 1 + return 1 + else: + while pos2 >= 0: + sub = trans2_items[pos2] - trans1_items[pos1] + if sub != 0: + return sub + pos1 -= 1 + pos2 -= 1 + return 0
+ + + def _useUtilityBinArrayToCalculateLocalUtilityFirstTime(self, dataset: _Dataset) -> None: + """ + A method to calculate local utility of single itemSets + + :Attributes: + + :param dataset: the transaction database. + :type dataset: database + :return: None + + """ + for transaction in dataset.getTransactions(): + for item in transaction.getItems(): + if item in self._utilityBinArrayLU: + self._utilityBinArrayLU[item] += transaction.transactionUtility + else: + self._utilityBinArrayLU[item] = transaction.transactionUtility + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """Storing final patterns in a dataframe + + :return: returning patterns in a dataframe + :rtype: pd.DataFrame + """ + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b[0], b[1]]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Utility', 'UtilityRatio']) + + return dataFrame
+ + +
+[docs] + def getPatterns(self) -> dict: + """ Function to send the set of patterns after completion of the mining process + + :return: returning patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: file + :return: None + """ + self.oFile = outFile + writer = open(self.oFile, 'w+') + for x, y in self._finalPatterns.items(): + patternsAndSupport = x.strip() + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % patternsAndSupport)
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """Calculating the total amount of runtime taken by the mining process + + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime-self._startTime
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + :return: None + """ + print("Total number of Relative Utility Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == '__main__': + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: #includes separator + _ap = RHUIM(_ab._sys.argv[1], int(_ab._sys.argv[3]), float(_ab._sys.argv[4]), _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: #takes "\t" as a separator + _ap = RHUIM(_ab._sys.argv[1], int(_ab._sys.argv[3]), float(_ab._sys.argv[4])) + _ap.startMine() + print("Total number of Relative High Utility Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + else: + _ap = RHUIM('/Users/likhitha/Downloads/utility_datasets/Utility_T10I4D100K.csv', 150000, 0.6, '\t') + _ap.startMine() + print("Total number of Relative High Utility Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in seconds:", _ap.getRuntime()) + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/sequentialPatternMining/basic/SPADE.html b/sphinx/_build/html/_modules/PAMI/sequentialPatternMining/basic/SPADE.html new file mode 100644 index 000000000..ca79d613c --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/sequentialPatternMining/basic/SPADE.html @@ -0,0 +1,1040 @@ + + + + + + PAMI.sequentialPatternMining.basic.SPADE — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.sequentialPatternMining.basic.SPADE

+# SPADE is one of the fundamental algorithm to discover sequential frequent patterns in a transactional database.
+# This program employs SPADE property (or downward closure property) to  reduce the search space effectively.
+# This algorithm employs breadth-first search technique when 1-2 length patterns and depth-first search when above 3 length patterns to find the complete set of frequent patterns in a transactional database.
+#
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             import PAMI.sequentialPatternMining.basic.SPADE as alg
+#
+#             obj = alg.SPADE(iFile, minSup)
+#
+#             obj.startMine()
+#
+#             sequentialPatternMining = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(frequentPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+
+import pandas as pd
+from deprecated import deprecated
+
+from PAMI.sequentialPatternMining.basic import abstract as _ab
+
+_ab._sys.setrecursionlimit(10000)
+
+
+[docs] +class SPADE(_ab._sequentialPatterns): + """ + :Description: + + * SPADE is one of the fundamental algorithm to discover sequential frequent patterns in a transactional database. + * This program employs SPADE property (or downward closure property) to reduce the search space effectively. + * This algorithm employs breadth-first search technique when 1-2 length patterns and depth-first serch when above 3 length patterns to find the complete set of frequent patterns in a transactional database. + + :Reference: Mohammed J. Zaki. 2001. SPADE: An Efficient Algorithm for Mining Frequent Sequences. Mach. Learn. 42, 1-2 (January 2001), 31-60. DOI=10.1023/A:1007652502315 http://dx.doi.org/10.1023/A:1007652502315 + + :param iFile: str : + Name of the Input file to mine complete set of Sequential frequent patterns + :param oFile: str : + Name of the output file to store complete set of Sequential frequent patterns + :param minSup: float or int or str : + minSup measure constraints the minimum number of transactions in a database where a pattern must appear + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + iFile : str + Input file name or path of the input file + oFile : str + Name of the output file or the path of output file + minSup: float or int or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + finalPatterns: dict + Storing the complete set of patterns in a dictionary variable + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + Database : list + To store the transactions of a database in list + _xLenDatabase: dict + To store the datas in different sequence separated by sequence, rownumber, length. + _xLenDatabaseSame : dict + To store the datas in same sequence separated by sequence, rownumber, length. + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + savePatterns(oFile) + Complete set of frequent patterns will be loaded in to an output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + candidateToFrequent(candidateList) + Generates frequent patterns from the candidate patterns + frequentToCandidate(frequentList, length) + Generates candidate patterns from the frequent patterns + + **Methods to execute code on terminal** + ------------------------------------------- + .. code-block:: console + + + Format: + + (.venv) $ python3 SPADE.py <inputFile> <outputFile> <minSup> + + Example usage: + + (.venv) $ python3 SPADE.py sampleDB.txt patterns.txt 10.0 + + + .. note:: minSup will be considered in times of minSup and count of database transactions + + **Importing this algorithm into a python program** + ---------------------------------------------------- + .. code-block:: python + + import PAMI.sequentialPatternMining.basic.SPADE as alg + + obj = alg.SPADE(iFile, minSup) + + obj.startMine() + + sequentialPatternMining = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(frequentPatterns)) + + obj.save(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + --------------- + + The complete program was written by Suzuki Shota under the supervision of Professor Rage Uday Kiran. + + """ + + _minSup = float() + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _xLenDatabase={} + _xLenDatabaseSame = {} + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + + if isinstance(self._iFile, _ab._pd.DataFrame): + temp = [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + temp = self._iFile['Transactions'].tolist() + if "tid" in i: + temp2=self._iFile[''].tolist() + addList=[] + addList.append(temp[0]) + for k in range(len(temp)-1): + if temp2[k]==temp[k+1]: + addList.append(temp[k+1]) + else: + self._Database.append(addList) + addList=[] + addList.append(temp[k+1]) + self._Database.append(addList) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + temp.pop() + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split('-1')] + temp = [x for x in temp if x ] + temp.pop() + + seq = [] + for i in temp: + k = -2 + if len(i)>1: + seq.append(list(sorted(set(i.split())))) + + else: + seq.append(i) + + self._Database.append(seq) + + except IOError: + print("File Not Found") + quit() + + def _convert(self, value): + """ + To convert the user specified minSup value + + :param value: user specified minSup value + :return: converted type + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + +
+[docs] + def make1LenDatabase(self): + """ + To make 1 length frequent patterns by breadth-first search technique and update Database to sequential database + """ + + idDatabase={} + alreadyInData=[] + lineNumber=0 + alreadyInLine={} + for line in self._Database: + seqNumber=0 + for seq in line: + + for data in seq: + if data in alreadyInData: + if lineNumber in alreadyInLine[data]: + idDatabase[data][lineNumber].append(seqNumber) + else: + idDatabase[data][lineNumber] = [] + idDatabase[data][lineNumber].append(seqNumber) + alreadyInLine[data].append(lineNumber) + else: + idDatabase[data]={} + idDatabase[data][lineNumber]=[] + idDatabase[data][lineNumber].append(seqNumber) + alreadyInData.append(data) + alreadyInLine[data]=[] + alreadyInLine[data].append(lineNumber) + + + + seqNumber+=1 + lineNumber+=1 + + newDatabase={i :idDatabase[i] for i in idDatabase.keys()} + for key in idDatabase.keys(): + if len(idDatabase[key].keys())<self._minSup: + newDatabase.pop(key) + else: + self._finalPatterns[str(key)]=len(idDatabase[key].keys()) + self._Database=newDatabase
+ + + +
+[docs] + def make2LenDatabase(self): + """ + To make 2 length frequent patterns by joining two one length patterns by breadth-first search technique and update xlen Database to sequential database + """ + self._xLenDatabase = {} + keyList=[i for i in self._Database.keys()] + nextDatabase={i:{} for i in self._Database.keys()} + nextDatabaseSame={i:{} for i in self._Database.keys()} + keyNumber=-1 + for key1 in keyList: + keyNumber+=1 + for key2 in keyList[keyNumber:]: + if key1!=key2: + + + if len(self._Database[key1].keys())>=len(self._Database[key1].keys()): + nextDatabase[key1][key2] = {} + nextDatabase[key2][key1] = {} + nextDatabaseSame[key1][key2] = {} + + for seq in self._Database[key2].keys(): + if seq in self._Database[key1].keys(): + x=[i for i in self._Database[key1][seq] if i >self._Database[key2][seq][0]] + if len(x) != 0: + nextDatabase[key2][key1][seq]=x + x = [i for i in self._Database[key2][seq] if i > self._Database[key1][seq][0]] + if len(x) != 0: + nextDatabase[key1][key2][seq]=x + x=list(sorted(set(self._Database[key1][seq])&set(self._Database[key2][seq]))) + if len(x)!=0: + nextDatabaseSame[key1][key2][seq]=x + else: + nextDatabase[key1][key2] = {} + nextDatabase[key2][key1] = {} + nextDatabaseSame[key1][key2] = {} + + for seq in self._Database[key1].keys(): + x = [i for i in self._Database[key1][seq] if + i > self._Database[key2][seq][0]] + if len(x)!=0: + nextDatabase[key2][key1][seq]=0 + x = [i for i in self._Database[key2][seq] if + i > self._Database[key1][seq][0]] + if len(x)!=0: + nextDatabase[key1][key2][seq]=x + x= list( + sorted(set(self._Database[key1][seq]) & set(self._Database[key2][seq]))) + if len(x)!=0: + nextDatabaseSame[key1][key2][seq]=x + else: + nextDatabase[key1][key2] = {} + for seq in self._Database[key2].keys(): + if len(self._Database[key1][seq])>=2: + nextDatabase[key1][key2][seq]= self._Database[key2][seq][1:] + self._xLenDatabase[2] = {tuple([i]): {} for i in nextDatabase.keys()} + for key1 in nextDatabase.keys(): + for key2 in nextDatabase[key1].keys(): + if len(nextDatabase[key1][key2].keys())>=self._minSup: + self._finalPatterns[str((key1,-1,key2,-1))]=len(nextDatabase[key1][key2].keys()) + self._xLenDatabase[2][tuple([key1])][key2]=nextDatabase[key1][key2] + self._xLenDatabaseSame[2]={tuple([i]): {} for i in nextDatabaseSame.keys()} + for key1 in nextDatabaseSame.keys(): + for key2 in nextDatabaseSame[key1].keys(): + if len(nextDatabaseSame[key1][key2].keys()) >= self._minSup: + self._finalPatterns[str((key1,key2,-1))]=len(nextDatabaseSame[key1][key2].keys()) + self._xLenDatabaseSame[2][tuple([key1])][key2]={i:nextDatabaseSame[key1][key2][i] for i in nextDatabaseSame[key1][key2].keys()} + self._xLenDatabaseSame[2][tuple([key2])][key1] = {i: nextDatabaseSame[key1][key2][i] for i in nextDatabaseSame[key1][key2].keys()}
+ + +
+[docs] + def make3LenDatabase(self): + """ + To call each 2 length patterns to make 3 length frequent patterns depth-first search technique + """ + for i in self._xLenDatabase[2].keys(): + for k in self._xLenDatabase[2][i].keys(): + self.makexLenDatabase(2,i,k) + for i in self._xLenDatabaseSame[2].keys(): + for k in self._xLenDatabaseSame[2][i].keys(): + self.makexLenDatabaseSame(2,i,k)
+ + +
+[docs] + def makexLenDatabase(self, rowLen, bs, latestWord): + """ + To make "rowLen" length frequent patterns from pattern which the latest word is in same seq by joining "rowLen"-1 length patterns by depth-first search technique and update xlenDatabase to sequential database + + :param rowLen: row length of patterns. + :param bs : patterns without the latest one + :param latestWord : latest word of patterns + """ + if rowLen+1 not in self._xLenDatabase: + self._xLenDatabase[rowLen+1]={} + self._xLenDatabaseSame[rowLen+1]={} + for latestWord2 in self._xLenDatabase[rowLen][bs].keys(): + if latestWord != latestWord2: + + if len(self._xLenDatabase[rowLen][bs][latestWord].keys()) <= len(self._xLenDatabase[rowLen][bs][latestWord2].keys()): + next={} + next2={} + nextSame={} + + for seq in self._xLenDatabase[rowLen][bs][latestWord].keys(): + if seq in self._xLenDatabase[rowLen][bs][latestWord2].keys(): + if self._xLenDatabase[rowLen][bs][latestWord2][seq]!=[] and self._xLenDatabase[rowLen][bs][latestWord][seq]!=[]: + x = [i for i in self._xLenDatabase[rowLen][bs][latestWord2][seq] if + i > self._xLenDatabase[rowLen][bs][latestWord][seq][0]] + if len(x) != 0: + next[seq] = x + x = [i for i in self._xLenDatabase[rowLen][bs][latestWord][seq] if + i > self._xLenDatabase[rowLen][bs][latestWord2][seq][0]] + if len(x) != 0: + next2[seq] = x + x = list(sorted(set(self._xLenDatabase[rowLen][bs][latestWord][seq]) & set( + self._xLenDatabase[rowLen][bs][latestWord2][seq]))) + if len(x) != 0: + nextSame[seq] = x + if len(next)>=self._minSup: + nextRow,nextbs= self.makeNextRow(bs,latestWord,latestWord2) + if str(nextRow) not in self._finalPatterns.keys(): + self._finalPatterns[str(nextRow)] = len(next) + if nextbs not in self._xLenDatabase[rowLen + 1]: + self._xLenDatabase[rowLen + 1][nextbs]={} + self._xLenDatabase[rowLen+1][nextbs][latestWord2]={i:next[i] for i in next } + self.makexLenDatabase(rowLen+1,nextbs,latestWord2) + if len(next2)>=self._minSup: + nextRow,nextbs = self.makeNextRow(bs, latestWord2, latestWord) + if str(nextRow) not in self._finalPatterns.keys(): + if nextbs not in self._xLenDatabase[rowLen + 1]: + self._xLenDatabase[rowLen + 1][nextbs]={} + self._finalPatterns[str(nextRow)] = len(next2) + self._xLenDatabase[rowLen+1][nextbs][latestWord] = {i:next2[i] for i in next2 } + self.makexLenDatabase(rowLen+1, nextbs, latestWord) + if len(nextSame) >= self._minSup: + nextRow,nextbs ,nextlste= self.makeNextRowSame3(bs, latestWord, latestWord2) + if str(nextRow) not in self._finalPatterns.keys(): + if nextbs not in self._xLenDatabaseSame[rowLen + 1]: + self._xLenDatabaseSame[rowLen + 1][nextbs]={} + self._finalPatterns[str(nextRow)] = len(nextSame) + self._xLenDatabaseSame[rowLen+1][nextbs][nextlste]={i:nextSame[i] for i in nextSame } + self.makexLenDatabaseSame(rowLen+1, nextbs, nextlste) + + else: + next = {} + next2 = {} + nextSame = {} + + for seq in self._xLenDatabase[rowLen][bs][latestWord2].keys(): + if seq in self._xLenDatabase[rowLen][bs][latestWord].keys(): + if self._xLenDatabase[rowLen][bs][latestWord2][seq] != [] and self._xLenDatabase[rowLen][bs][latestWord][seq] != []: + x = [i for i in self._xLenDatabase[rowLen][bs][latestWord2][seq] if i > self._xLenDatabase[rowLen][bs][latestWord][seq][0]] + if len(x)!=0: + next[seq]=x + x= [i for i in self._xLenDatabase[rowLen][bs][latestWord][seq] if i > self._xLenDatabase[rowLen][bs][latestWord2][seq][0]] + if len(x)!=0: + next2[seq]=x + x= list(sorted(set(self._xLenDatabase[rowLen][bs][latestWord][seq]) & set( + self._xLenDatabase[rowLen][bs][latestWord2][seq]))) + if len(x)!=0: + nextSame[seq]=x + if len(next) >= self._minSup: + nextRow,nextbs = self.makeNextRow(bs, latestWord, latestWord2) + if str(nextRow) not in self._finalPatterns.keys(): + if nextbs not in self._xLenDatabase[rowLen + 1]: + self._xLenDatabase[rowLen+1][nextbs]={} + self._finalPatterns[str(nextRow)] = len(next) + self._xLenDatabase[rowLen + 1][nextbs][latestWord2] ={i:next[i] for i in next } + self.makexLenDatabase(rowLen+1, nextbs, latestWord2) + if len(next2) >= self._minSup: + nextRow,nextbs = self.makeNextRow(bs, latestWord2, latestWord) + if str(nextRow) not in self._finalPatterns.keys(): + if nextbs not in self._xLenDatabase[rowLen + 1]: + self._xLenDatabase[rowLen + 1][nextbs]={} + self._finalPatterns[str(nextRow)] = len(next2) + self._xLenDatabase[rowLen+1][nextbs][latestWord] = {i:next2[i] for i in next2 } + self.makexLenDatabase(rowLen+1, nextbs, latestWord) + if len(nextSame) >= self._minSup: + nextRow,nextbs,nextlate = self.makeNextRowSame3(bs,latestWord,latestWord2) + if str(nextRow) not in self._finalPatterns.keys(): + if nextbs not in self._xLenDatabaseSame[rowLen + 1]: + self._xLenDatabaseSame[rowLen + 1][nextbs]={} + self._finalPatterns[str(nextRow)] = len(nextSame) + self._xLenDatabaseSame[rowLen+1][nextbs][nextlate] = {i:nextSame[i] for i in nextSame} + self.makexLenDatabaseSame(rowLen+1, nextbs, nextlate) + + else: + next= {} + for seq in self._xLenDatabase[rowLen][bs][latestWord2].keys(): + if len(self._xLenDatabase[rowLen][bs][latestWord][seq])>=2: + next[seq]= self._xLenDatabase[rowLen][bs][latestWord][seq][1:] + if len(next) >= self._minSup: + nextRow, nextbs= self.makeNextRow(bs,latestWord,latestWord2) + if str(nextRow) not in self._finalPatterns.keys(): + if nextbs not in self._xLenDatabase[rowLen+1]: + self._xLenDatabase[rowLen+1][nextbs]={} + self._finalPatterns[str(nextRow)] = len(next) + self._xLenDatabase[rowLen+1][nextbs][latestWord2] ={i:next[i] for i in next } + self.makexLenDatabase(rowLen+1, nextbs, latestWord2) + if bs in self._xLenDatabaseSame[rowLen]: + for latestWord2 in self._xLenDatabaseSame[rowLen][bs]: + + + if len(self._xLenDatabase[rowLen][bs][latestWord].keys()) <= len( + self._xLenDatabaseSame[rowLen][bs][latestWord2].keys()): + next = {} + + for seq in self._xLenDatabase[rowLen][bs][latestWord].keys(): + if seq in self._xLenDatabaseSame[rowLen][bs][latestWord2].keys(): + if self._xLenDatabaseSame[rowLen][bs][latestWord2][seq] != []: + x= [i for i in self._xLenDatabase[rowLen][bs][latestWord][seq] if i > self._xLenDatabaseSame[rowLen][bs][latestWord2][seq][0]] + if len(x) != 0: + next[seq] = x + if len(next) >= self._minSup: + + nextRow ,nextbs= self.makeNextRowSame(bs, latestWord2, latestWord) + if str(nextRow) not in self._finalPatterns.keys(): + if nextbs not in self._xLenDatabase[rowLen + 1]: + self._xLenDatabase[rowLen + 1][nextbs] = {} + self._finalPatterns[str(nextRow)] = len(next) + self._xLenDatabase[rowLen + 1][nextbs][latestWord] = {i:next[i] for i in next } + self.makexLenDatabase(rowLen + 1, nextbs, latestWord) + + else: + next = {} + for seq in self._xLenDatabaseSame[rowLen][bs][latestWord2].keys(): + if seq in self._xLenDatabase[rowLen][bs][latestWord].keys(): + if self._xLenDatabaseSame[rowLen][bs][latestWord2][seq] != [] : + x= [i for i in self._xLenDatabase[rowLen][bs][latestWord][seq] if + i > self._xLenDatabaseSame[rowLen][bs][latestWord2][seq][0]] + if len(x) != 0: + next[seq] = x + if len(next) >= self._minSup: + nextRow,nextbs = self.makeNextRowSame(bs, latestWord2, latestWord) + if str(nextRow) not in self._finalPatterns.keys(): + if nextbs not in self._xLenDatabase[rowLen + 1]: + self._xLenDatabase[rowLen + 1][nextbs] = {} + self._finalPatterns[str(nextRow)] = len(next) + self._xLenDatabase[rowLen+1][nextbs][latestWord] = {i:next[i] for i in next } + self.makexLenDatabase(rowLen + 1, nextbs,latestWord)
+ + +
+[docs] + def makexLenDatabaseSame(self, rowLen, bs, latestWord): + """ + To make 3 or more length frequent patterns from pattern which the latest word is in different seq by depth-first search technique and update xlenDatabase to sequential database + + :param rowLen: row length of previous patterns. + :param bs : previous patterns without the latest one + :param latestWord : latest word of previous patterns + """ + if rowLen + 1 not in self._xLenDatabase: + self._xLenDatabase[rowLen + 1] = {} + self._xLenDatabaseSame[rowLen + 1] = {} + if bs in self._xLenDatabase[rowLen]: + for latestWord2 in self._xLenDatabase[rowLen][bs]: + if len(self._xLenDatabaseSame[rowLen][bs][latestWord].keys()) <= len(self._xLenDatabase[rowLen][bs][latestWord2].keys()): + next = {} + + for seq in self._xLenDatabaseSame[rowLen][bs][latestWord].keys(): + if seq in self._xLenDatabase[rowLen][bs][latestWord2].keys(): + if self._xLenDatabaseSame[rowLen][bs][latestWord][seq] != []: + x= [i for i in self._xLenDatabase[rowLen][bs][latestWord2][seq] if + i > self._xLenDatabaseSame[rowLen][bs][latestWord][seq][0]] + if len(x) != 0: + next[seq] = x + if len(next) >= self._minSup: + nextRow ,nextbs= self.makeNextRowSame(bs, latestWord, latestWord2) + if str(nextRow) not in self._finalPatterns.keys(): + if nextbs not in self._xLenDatabase[rowLen + 1]: + self._xLenDatabase[rowLen + 1][nextbs] = {} + self._finalPatterns[str(nextRow)] = len(next) + self._xLenDatabase[rowLen + 1][nextbs][latestWord2]= {i:next[i] for i in next} + self.makexLenDatabase(rowLen + 1, nextbs, latestWord2) + + else: + next = {} + + for seq in self._xLenDatabase[rowLen][bs][latestWord2].keys(): + if seq in self._xLenDatabaseSame[rowLen][bs][latestWord].keys(): + if self._xLenDatabaseSame[rowLen][bs][latestWord][seq] != []: + x= [i for i in self._xLenDatabase[rowLen][bs][latestWord2][seq] if + i > self._xLenDatabaseSame[rowLen][bs][latestWord][seq][0]] + if len(x) != 0: + next[seq] = x + if len(next) >= self._minSup: + nextRow,nextbs = self.makeNextRowSame(bs, latestWord, latestWord2) + if str(nextRow) not in self._finalPatterns.keys(): + if nextbs not in self._xLenDatabase[rowLen + 1]: + self._xLenDatabase[rowLen + 1][nextbs] = {} + self._finalPatterns[str(nextRow)] = len(next) + self._xLenDatabase[rowLen + 1][nextbs][latestWord2] = {i:next[i] for i in next} + self.makexLenDatabase(rowLen + 1,nextbs, latestWord2) + if bs in self._xLenDatabaseSame[rowLen]: + for latestWord2 in self._xLenDatabaseSame[rowLen][bs]: + if latestWord2!=latestWord: + if len(self._xLenDatabaseSame[rowLen][bs][latestWord].keys()) <= len( + self._xLenDatabaseSame[rowLen][bs][latestWord2].keys()): + next = {} + + for seq in self._xLenDatabaseSame[rowLen][bs][latestWord].keys(): + if seq in self._xLenDatabaseSame[rowLen][bs][latestWord2].keys(): + x= list(sorted(set(self._xLenDatabaseSame[rowLen][bs][latestWord][seq]) & set( + self._xLenDatabaseSame[rowLen][bs][latestWord2][seq]))) + if len(x) != 0: + next[seq] = x + if len(next) >= self._minSup: + + nextRow, nextbs,nextLate= self.makeNextRowSame2(bs, latestWord, latestWord2) + if str(nextRow) not in self._finalPatterns.keys(): + if nextbs not in self._xLenDatabaseSame[rowLen+1]: + self._xLenDatabaseSame[rowLen + 1][nextbs] = {} + + self._finalPatterns[str(nextRow)] = len(next) + self._xLenDatabaseSame[rowLen + 1][nextbs][nextLate] = {i:next[i] for i in next} + self.makexLenDatabaseSame(rowLen + 1, nextbs, nextLate) + else: + next = {} + + for seq in self._xLenDatabaseSame[rowLen][bs][latestWord2].keys(): + if seq in self._xLenDatabaseSame[rowLen][bs][latestWord].keys(): + x= list(sorted(set(self._xLenDatabaseSame[rowLen][bs][latestWord][seq]) & set( + self._xLenDatabaseSame[rowLen][bs][latestWord2][seq]))) + if len(x) != 0: + next[seq] = x + if len(next) >= self._minSup: + + nextRow, nextbs,nextLate= self.makeNextRowSame2(bs, latestWord, latestWord2) + if str(nextRow) not in self._finalPatterns.keys(): + if nextbs not in self._xLenDatabaseSame[rowLen+1]: + self._xLenDatabaseSame[rowLen + 1][nextbs] = {} + + self._finalPatterns[str(nextRow)] = len(next) + self._xLenDatabaseSame[rowLen + 1][nextbs][nextLate] = {i:next[i] for i in next} + self.makexLenDatabaseSame(rowLen + 1, nextbs, nextLate)
+ + +
+[docs] + def makeNextRow(self,bs, latestWord, latestWord2): + """ + To make pattern row when two patterns have the latest word in different sequence + + :param bs : previous pattern without the latest one + :param latestWord : latest word of one previous pattern + :param latestWord2 : latest word of other previous pattern + """ + + bs=bs+(-1,latestWord) + bs2=bs+(-1,latestWord2,-1) + return bs2,bs
+ + +
+[docs] + def makeNextRowSame(self,bs, latestWord, latestWord2): + """ + To make pattern row when one pattern have the latestWord1 in different sequence and other(latestWord2) in same + + :param bs : previous pattern without the latest one + :param latestWord : latest word of one previous pattern in same sequence + :param latestWord2 : latest word of other previous pattern in different sequence + """ + + bs=list(bs) + x=1 + x2=[latestWord,] + while bs: + x=bs.pop() + if x!=-1: + x2.append(x) + else: + break + x2=list(sorted(set(x2))) + if len(bs)!=0: + bs=tuple(bs)+(-1,)+tuple(x2) + else: + bs=tuple(x2) + bs2=tuple(bs)+(-1,latestWord2,-1) + return bs2,bs
+ + + +
+[docs] + def makeNextRowSame2(self,bs, latestWord, latestWord2): + """ + To make pattern row when two patterns have the latest word in same sequence + + :param bs : previous pattern without the latest one + :param latestWord : latest word of one previous pattern + :param latestWord2 : latest word of the other previous pattern + """ + + bs = list(bs) + x = 1 + x2 = [latestWord, latestWord2] + while bs: + x = bs.pop() + if x != -1: + x2.append(x) + else: + break + x2 = list(sorted(set(x2))) + x3 = x2.pop() + if len(bs)!=0: + bs = tuple(bs)+(-1,)+ tuple(x2) + else: + bs = tuple(x2) + bs2 = tuple(bs) + (x3, -1) + + return bs2, bs, x3
+ + + +
+[docs] + def makeNextRowSame3(self, bs, latestWord, latestWord2): + """ + To make pattern row when two patterns have the latest word in different sequence and both latest word is in same sequence + + :param bs : previous pattern without the latest one + :param latestWord : latest word of one previous pattern + :param latestWord2 : latest word of other previous pattern + """ + + x = list(sorted({latestWord, latestWord2})) + x2 = x.pop() + x3=x.pop() + bs = bs + (-1,x3) + bs2 = bs + (x2,) + return bs2,bs,x2
+ + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self): + """ + Frequent pattern mining process will start from here + """ + self._Database = [] + self._startTime = _ab._time.time() + self._creatingItemSets() + self._minSup = self._convert(self._minSup) + self.make1LenDatabase() + self.make2LenDatabase() + self.make3LenDatabase() + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Sequential Frequent patterns were generated successfully using SPADE algorithm ")
+ + +
+[docs] + def Mine(self): + """ + Frequent pattern mining process will start from here + """ + self._Database = [] + self._startTime = _ab._time.time() + self._creatingItemSets() + self._minSup = self._convert(self._minSup) + self.make1LenDatabase() + self.make2LenDatabase() + self.make3LenDatabase() + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Sequential Frequent patterns were generated successfully using SPADE algorithm ")
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a, b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + """ + This function is used to prnt the results + """ + print("Total number of Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = SPADE(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = SPADE(_ab._sys.argv[1], _ab._sys.argv[3]) + _ap.startMine() + _Patterns = _ap.getPatterns() + print("Total number of Frequent Patterns:", len(_Patterns)) + _ap.savePatterns(_ab._sys.argv[2]) + _memUSS = _ap.getMemoryUSS() + print("Total Memory in USS:", _memUSS) + _memRSS = _ap.getMemoryRSS() + print("Total Memory in RSS", _memRSS) + _run = _ap.getRuntime() + print("Total ExecutionTime in ms:", _run) + else: + _ap = SPADE('text3.txt' ,80, '\t') + _ap.startMine() + _Patterns = _ap.getPatterns() + _memUSS = _ap.getMemoryUSS() + print("Total Memory in USS:", _memUSS) + _memRSS = _ap.getMemoryRSS() + print("Total Memory in RSS", _memRSS) + _run = _ap.getRuntime() + print("Total ExecutionTime in ms:", _run) + print("Total number of Frequent Patterns:", len(_Patterns)) + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/sequentialPatternMining/basic/SPAM.html b/sphinx/_build/html/_modules/PAMI/sequentialPatternMining/basic/SPAM.html new file mode 100644 index 000000000..1d152de51 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/sequentialPatternMining/basic/SPAM.html @@ -0,0 +1,663 @@ + + + + + + PAMI.sequentialPatternMining.basic.SPAM — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.sequentialPatternMining.basic.SPAM

+# SPAM is one of the fundamental algorithm to discover sequential frequent patterns in a transactional database.
+# This program employs SPAM property (or downward closure property) to  reduce the search space effectively.
+#  This algorithm employs breadth-first search technique  to find the complete set of frequent patterns in a sequential database.
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             import PAMI.sequentialPatternMining.basic.SPAM as alg
+#
+#             obj = alg.SPAM(iFile, minSup)
+#
+#             obj.startMine()
+#
+#             sequentialPatternMining = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(frequentPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+
+import pandas as pd
+from deprecated import deprecated
+
+from PAMI.sequentialPatternMining.basic import abstract as _ab
+_ab._sys.setrecursionlimit(10000)
+
+
+[docs] +class SPAM(_ab._sequentialPatterns): + """ + :Description: SPAM is one of the fundamental algorithm to discover sequential frequent patterns in a transactional database. + This program employs SPAM property (or downward closure property) to reduce the search space effectively. + This algorithm employs breadth-first search technique to find the complete set of frequent patterns in a sequential database. + + :Reference: J. Ayres, J. Gehrke, T.Yiu, and J. Flannick. Sequential Pattern Mining Using Bitmaps. In Proceedings of the Eighth ACM SIGKDD International Conference on Knowledge Discovery and Data Mining. Edmonton, Alberta, Canada, July 2002. + + :param iFile: str : + Name of the Input file to mine complete set of Sequential frequent patterns + :param oFile: str : + Name of the output file to store complete set of Sequential frequent patterns + :param minSup: float or int or str : + minSup measure constraints the minimum number of transactions in a database where a pattern must appear + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + iFile : str + Input file name or path of the input file + oFile : str + Name of the output file or the path of output file + minSup : float or int or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + startTime : float + To record the start time of the mining process + endTime : float + To record the completion time of the mining process + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + Database : list + To store the sequences of a database in list + _idDatabase : dict + To store the sequences of a database by bit map + _maxSeqLen: + the maximum length of subsequence in sequence. + + :Methods: + + _creatingItemSets(): + Storing the complete sequences of the database/input file in a database variable + _convert(value): + To convert the user specified minSup value + make2BitDatabase(): + To make 1 length frequent patterns by breadth-first search technique and update Database to sequential database + DfsPruning(items,sStep,iStep): + the main algorithm of spam. This can search sstep and istep items and find next patterns, its sstep, and its istep. And call this function again by using them. Recursion until there are no more items available for exploration. + Sstep(s): + To convert bit to ssteo bit.The first time you get 1, you set it to 0 and subsequent ones to 1.(like 010101=>001111, 00001001=>00000111) + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + savePatterns(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + candidateToFrequent(candidateList) + Generates frequent patterns from the candidate patterns + frequentToCandidate(frequentList, length) + Generates candidate patterns from the frequent patterns + + + **Executing the code on terminal**: + ---------------------------------------- + .. code-block:: console + + + Format: + + (.venv) $ python3 SPAM.py <inputFile> <outputFile> <minSup> (<separator>) + + Examples usage: + + (.venv) $ python3 SPAM.py sampleDB.txt patterns.txt 10.0 + + + .. note:: minSup will be considered in times of minSup and count of database transactions + + **Sample run of the importing code**: + ------------------------------------- + import PAMI.sequentialPatternMining.basic.SPAM as alg + + obj = alg.SPAM(iFile, minSup) + + obj.startMine() + + sequentialPatternMining = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(frequentPatterns)) + + obj.savePatterns(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits**: + ------------ + The complete program was written by Shota Suzuki under the supervision of Professor Rage Uday Kiran. + """ + + _minSup = float() + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _idDatabase={} + _maxSeqLen=0 + def _creatingItemSets(self): + """ + Storing the complete sequences of the database/input file in a database variable + """ + self._Database = [] + + if isinstance(self._iFile, _ab._pd.DataFrame): + temp = [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + temp = self._iFile['Transactions'].tolist() + if "tid" in i: + temp2=self._iFile[''].tolist() + addList=[] + addList.append(temp[0]) + for k in range(len(temp)-1): + if temp2[k]==temp[k+1]: + addList.append(temp[k+1]) + else: + self._Database.append(addList) + addList=[] + addList.append(temp[k+1]) + self._Database.append(addList) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + temp.pop() + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split('-1')] + temp = [x for x in temp if x ] + temp.pop() + + seq = [] + for i in temp: + k = -2 + if len(i)>1: + seq.append(list(sorted(set(i.split())))) + + else: + seq.append(i) + + self._Database.append(seq) + + except IOError: + print("File Not Found") + quit() + + def _convert(self, value): + """ + To convert the user specified minSup value + + :param value: user specified minSup value + :return: converted type + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + +
+[docs] + def make2BitDatabase(self): + """ + To make 1 length frequent patterns by breadth-first search technique and update Database to sequential database + """ + self._maxSeqLen=max([len(i) for i in self._Database]) + lineNumber=0 + idDatabase={} + for line in self._Database: + seqNumber=1 + for seq in line: + + for data in seq: + if data in idDatabase: + while lineNumber+1!=len(idDatabase[data]): + idDatabase[data].append(0) + idDatabase[data][lineNumber]+=int(2**(self._maxSeqLen-seqNumber)) + + else: + idDatabase[data]=[] + while lineNumber+1!=len(idDatabase[data]): + idDatabase[data].append(0) + idDatabase[data][lineNumber]+=(int(2 ** (self._maxSeqLen-seqNumber))) + + seqNumber+=1 + lineNumber+=1 + for key,val in idDatabase.items(): + + sup=self.countSup(val) + while lineNumber+1!=len(idDatabase[key]): + idDatabase[key].append(0) + if sup>=self._minSup: + self._finalPatterns[str(key)+self._sep+"-2"]=sup + self._idDatabase[str(key)]=val
+ + +
+[docs] + def DfsPruning(self,items,sStep,iStep): + """ + the main algorithm of spam. This can search sstep and istep items and find next patterns, its sstep, and its istep. And call this function again by using them. Recursion until there are no more items available for exploration. + + :Attributes: + + items : str + The pattrens I got before + sStep : list + Items presumed to have "sstep" relationship with "items".(sstep is What appears later like a-b and a-c) + iStep : list + Items presumed to have "istep" relationship with "items"(istep is What appears in same time like ab and ac) + + """ + Snext=[] + Inext=[] + ns = self.Sstep(self._idDatabase[items]) + for i in sStep: + nnext=[] + for k in range(len(self._idDatabase[items])): + nandi=ns[k] & self._idDatabase[i][k] + nnext.append(nandi) + + + sup=self.countSup(nnext) + if sup>=self._minSup: + key=items+self._sep+"-1"+self._sep+i + self._finalPatterns[key+self._sep+"-1"+self._sep+"-2"]=sup + self._idDatabase[key]=nnext + Snext.append(i) + + for i in Snext: + key = items+self._sep+"-1"+self._sep+i + self.DfsPruning(key,Snext,[k for k in Snext if self._Database.index(i)<self._Database.index(k)]) + for i in iStep: + nnext = [] + + for k in range(len(self._idDatabase[items])): + nandi = self._idDatabase[items][k] & self._idDatabase[i][k] + nnext.append(nandi) + sup=self.countSup(nnext) + if sup>=self._minSup: + key=items+self._sep+str(i) + self._finalPatterns[key+self._sep+"-1"+self._sep+"-2"]=sup + self._idDatabase[key]=nnext + Inext.append(i) + for i in Inext: + key = items +self._sep +str(i) + self.DfsPruning(key,Snext,[k for k in Inext if self._Database.index(i)<self._Database.index(k)])
+ + +
+[docs] + def Sstep(self,s): + """ + To convert bit to Sstep bit.The first time you get 1, you set it to 0 and subsequent ones to 1.(like 010101=>001111, 00001001=>00000111) + + + :param s:list + to store each bit sequence + :return: + nextS:list to store the bit sequence converted by sstep + + """ + nextS=[] + for bins in s: + binS=str(bin(bins)) + + + LenNum=2 + for i in range(len(binS)-2): + if binS[LenNum] == "1": + + binS = binS[:LenNum] + "0" + binS[LenNum + 1:] + while len(binS)-1!=LenNum: + LenNum += 1 + binS = binS[:LenNum] + "1" + binS[LenNum + 1:] + break + LenNum+=1 + nextS.append(int(binS, 0)) + + + return nextS
+ + +
+[docs] + def countSup(self,n): + """ + count support + + :param n:list + to store each bit sequence + :return: + count: int support of this list + """ + count=0 + for i in n: + if "1" in str(bin(i)): + count+=1 + return count
+ + +
+[docs] + def startMine(self): + """ + Frequent pattern mining process will start from here + """ + self._Database = [] + self._startTime = _ab._time.time() + self._creatingItemSets() + self._minSup = self._convert(self._minSup) + self.make2BitDatabase() + self._Database = [i for i in self._idDatabase.keys()] + for i in self._Database: + x=[] + for j in self._Database: + if self._Database.index(i)<self._Database.index(j): + x.append(j) + + self.DfsPruning(i,self._Database,x) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Frequent patterns were generated successfully using Apriori algorithm ")
+ + +
+[docs] + def getMemoryUSS(self): + """Total amount of USS memory consumed by the mining process will be retrieved from this function + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """Total amount of RSS memory consumed by the mining process will be retrieved from this function + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """Calculating the total amount of runtime taken by the mining process + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """Storing final frequent patterns in a dataframe + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a, b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """Complete set of frequent patterns will be loaded in to an output file + :param outFile: name of the output file + :type outFile: file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self): + """ Function to send the set of frequent patterns after completion of the mining process + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Total number of Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = SPAM(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = SPAM(_ab._sys.argv[1], _ab._sys.argv[3]) + _ap.startMine() + _Patterns = _ap.getPatterns() + print("Total number of Frequent Patterns:", len(_Patterns)) + _ap.savePatterns(_ab._sys.argv[2]) + _memUSS = _ap.getMemoryUSS() + print("Total Memory in USS:", _memUSS) + _memRSS = _ap.getMemoryRSS() + print("Total Memory in RSS", _memRSS) + _run = _ap.getRuntime() + print("Total ExecutionTime in ms:", _run) + else: + + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/sequentialPatternMining/basic/prefixSpan.html b/sphinx/_build/html/_modules/PAMI/sequentialPatternMining/basic/prefixSpan.html new file mode 100644 index 000000000..438443aca --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/sequentialPatternMining/basic/prefixSpan.html @@ -0,0 +1,807 @@ + + + + + + PAMI.sequentialPatternMining.basic.prefixSpan — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.sequentialPatternMining.basic.prefixSpan

+# Prefix Span is one of the fundamental algorithm to discover sequential frequent patterns in a transactional database.
+# This program employs Prefix Span property (or downward closure property) to  reduce the search space effectively.
+# This algorithm employs depth-first search technique to find the complete set of frequent patterns in a transactional database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             import PAMI.frequentPattern.basic.prefixSpan as alg
+#
+#             obj = alg.prefixSpan(iFile, minSup,oFile,sep)
+#
+#             obj.startMine()
+#
+#             frequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(frequentPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+"""
+
+import pandas as pd
+from deprecated import deprecated
+
+from PAMI.sequentialPatternMining.basic import abstract as _ab
+import copy
+import re
+_ab._sys.setrecursionlimit(10000)
+
+
+[docs] +class prefixSpan(_ab._sequentialPatterns): + """ + :Description: + * Prefix Span is one of the fundamental algorithm to discover sequential frequent patterns in a transactional database. + * This program employs Prefix Span property (or downward closure property) to reduce the search space effectively. + * This algorithm employs depth-first search technique to find the complete set of frequent patterns in a transactional database. + + :Reference: J. Pei, J. Han, B. Mortazavi-Asl, J. Wang, H. Pinto, Q. Chen, U. Dayal, M. Hsu: Mining Sequential Patterns by Pattern-Growth: The PrefixSpan Approach. IEEE Trans. Knowl. Data Eng. 16(11): 1424-1440 (2004) + + :param iFile: str : + Name of the Input file to mine complete set of Sequential frequent patterns + :param oFile: str : + Name of the output file to store complete set of Sequential frequent patterns + :param minSup: float or int or str : + minSup measure constraints the minimum number of transactions in a database where a pattern must appear + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + iFile : str + Input file name or path of the input file + oFile : str + Name of the output file or the path of output file + minSup : float or int or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + startTime : float + To record the start time of the mining process + endTime : float + To record the completion time of the mining process + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + Database : list + To store the transactions of a database in list + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + savePatterns(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + candidateToFrequent(candidateList) + Generates frequent patterns from the candidate patterns + frequentToCandidate(frequentList, length) + Generates candidate patterns from the frequent patterns + + **Methods to execute code on terminal** + ------------------------------------------ + .. code-block:: console + + + Format: + + (.venv) $ python3 prefixSpan.py <inputFile> <outputFile> <minSup> + + Example usage: + + (.venv) $ python3 prefixSpan.py sampleDB.txt patterns.txt 10 + + + .. note:: minSup will be considered in support count or frequency + + + **Importing this algorithm into a python program** + ----------------------------------------------------- + .. code-block:: python + + import PAMI.frequentPattern.basic.prefixSpan as alg + + obj = alg.prefixSpan(iFile, minSup) + + obj.startMine() + + frequentPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(frequentPatterns)) + + obj.save(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + --------------- + + The complete program was written by Suzuki Shota under the supervision of Professor Rage Uday Kiran. + """ + + _minSup = float() + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _sepDatabase={} + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + + if isinstance(self._iFile, _ab._pd.DataFrame): + temp = [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + temp = self._iFile['Transactions'].tolist() + + for k in temp: + self._Database.append(set(k)) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(set(temp)) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(':')] + temp = [x for x in temp if x ] + + seq = [] + for i in temp: + if len(i)>1: + for i in list(sorted(set(i.split()))): + seq.append(i) + seq.append(":") + + else: + seq.append(i) + seq.append(":") + self._Database.append(seq) + + + except IOError: + print("File Not Found") + quit() + + def _convert(self, value): + """ + To convert the user specified minSup value + + :param value: user specified minSup value + :return: converted type + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value +
+[docs] + def makeNext(self,sepDatabase,startrow): + """ + To get next pattern by adding head word to next sequence of startrow + + :param sepDatabase: dict + what words and rows startrow have to add it. + :param startrow: + the patterns get before + """ + for head in sepDatabase.keys(): + newrow=[i for i in startrow] + + if len(sepDatabase[head])>=self._minSup: + if newrow!=[]: + newrow.append(":") + newrow.append(head) + newrow.append(":") + if str(newrow) not in self._finalPatterns: + self._finalPatterns[str(newrow)]=len(sepDatabase[head]) + give = [] + give.append(head) + sepDatabase[head] = self.makeSupDatabase(sepDatabase[head], give) + newrow.pop() + self.makeSeqDatabaseSame(sepDatabase[head], newrow) + elif len(sepDatabase[head]) > self._finalPatterns[str(newrow)]: + self._finalPatterns[str(newrow)] = len(sepDatabase[head]) + give = [] + give.append(head) + sepDatabase[head] = self.makeSupDatabase(sepDatabase[head], give) + newrow.pop() + self.makeSeqDatabaseSame(sepDatabase[head], newrow)
+ + + + +
+[docs] + def makeSupDatabase(self,database,head): + """ + To delete not frequent words without words in the latest sequence + + :param database: list + database of lines having same startrow and head word + :param head:list + words in the latest sequence + :return: changed database + """ + + supDatabase={} + alreadyInData=[] + newDatabase = [] + for line in database: + alreadyInLine = [] + for data in line: + if data not in alreadyInLine: + if data not in alreadyInData: + supDatabase[data]=1 + alreadyInData.append(data) + else: + supDatabase[data]+=1 + alreadyInLine.append(data) + for line in database: + newLine=[] + for i in line: + if supDatabase[i]>=self._minSup or i in head: + if len(newLine)>1: + if (newLine[-1]!=":" or i!=":"): + newLine.append(i) + else: + newLine.append(i) + newDatabase.append(newLine) + + return newDatabase
+ + +
+[docs] + def makeNextSame(self,sepDatabase,startrow): + """ + To get next pattern by adding head word to the latest sequence of startrow + + :param sepDatabase: dict + what words and rows startrow have to add it + :param startrow: + the patterns get before + """ + for head in sepDatabase.keys(): + + if len(sepDatabase[head])>=self._minSup: + newrow = startrow.copy() + newrow.append(head) + newrow.append(":") + if str(newrow) not in self._finalPatterns.keys(): + self._finalPatterns[str(newrow)]=len(sepDatabase[head]) + if ":" in startrow: + give = self.getSameSeq(startrow) + else: + give = startrow.copy() + give.append(head) + sepDatabase[head] = self.makeSupDatabase(sepDatabase[head], give) + newrow.pop() + self.makeSeqDatabaseSame(sepDatabase[head], newrow) + elif len(sepDatabase[head])>self._finalPatterns[str(newrow)]: + self._finalPatterns[str(newrow)] = len(sepDatabase[head]) + if ":" in startrow: + give = self.getSameSeq(startrow) + else: + give = startrow.copy() + give.append(head) + sepDatabase[head] = self.makeSupDatabase(sepDatabase[head], give) + newrow.pop() + self.makeSeqDatabaseSame(sepDatabase[head], newrow)
+ +
+[docs] + def makeSeqDatabaseFirst(self,database): + """ + To make 1 length sequence dataset list which start from same word. It was stored only 1 from 1 line. + + :param database: + To store the transactions of a database in list + """ + startrow=[] + seqDatabase={} + + for line in database: + alreadyInLine=[] + for data in range(len(line)): + if line[data] not in alreadyInLine and line[data]!=":": + if line[data] not in seqDatabase.keys(): + seqDatabase[line[data]]=[] + seqDatabase[line[data]].append(line[data+1:]) + alreadyInLine.append(line[data]) + else: + seqDatabase[line[data]].append(line[data+1:]) + alreadyInLine.append(line[data]) + + if len(seqDatabase)>0: + self.makeNext(seqDatabase,startrow)
+ + +
+[docs] + def serchSame(self,database,startrow,give): + """ + To get 2 or more length patterns in same sequence. + + :param database: list + To store the transactions of a database in list which have same startrow and head word + :param startrow: list + the patterns get before + :param give: list + the word in the latest sequence of startrow + """ + sepDatabaseSame={} + sepDatabaseSame[startrow[-1]]=[] + for line in database: + addLine=0 + i=0 + if len(line)>1: + while line[i]!=":": + if line[i]==startrow[-1]: + sepDatabaseSame[startrow[-1]].append(line[i+1:]) + addLine=1 + break + i+=1 + if addLine!=1: + ok=[] + while i <len(line): + if line[i]==":": + ok=[] + elif line[i]==startrow[-1]: + ok.append("sk1") + for x in give: + if x==line[i]: + ok.append(x) + if len(ok)==1+len(give): + sepDatabaseSame[startrow[-1]].append(line[i+1:]) + break + i+=1 + startrow2=[startrow[0]] + startrow.append(":") + if str(startrow) not in self._finalPatterns.keys(): + self.makeNextSame(sepDatabaseSame,startrow2) + elif self._finalPatterns[str(startrow)]<len(sepDatabaseSame[startrow[-2]]): + self.makeNextSame(sepDatabaseSame,startrow2) + return sepDatabaseSame[startrow[-2]]
+ + +
+[docs] + def getSameSeq(self,startrow): + """ + To get words in the latest sequence + :param startrow: the patterns get before + + """ + give = [] + newrow = startrow.copy() + while newrow[-1] != ":": + y = newrow.pop() + give.append(y) + return give
+ + +
+[docs] + def makeSeqDatabaseSame(self,database,startrow): + """ + To make sequence dataset list which start from same word(head). It was stored only 1 from 1 line. + And it separated by having head in the latest sequence of startrow or not. + + :param database: + To store the transactions of a database in list + :param startrow: the patterns get before + + """ + seqDatabase={} + seqDatabaseSame={} + for line in database: + if len(line)>1: + alreadyInLine=[] + i = 0 + while line[i] != ":": + if line[i] not in seqDatabaseSame: + if ":" in startrow: + give=self.getSameSeq(startrow) + else: + give=startrow.copy() + newrow= [startrow[-1], line[i]] + seqDatabaseSame[line[i]] = self.serchSame(database, newrow,give) + + i += 1 + same=0 + while len(line)>i: + if line[i]!=":": + if line[i] not in alreadyInLine: + if line[i] not in seqDatabase: + seqDatabase[line[i]]=[] + seqDatabase[line[i]].append(line[i + 1:]) + alreadyInLine.append(line[i]) + if line[i]==startrow[-1]: + same=1 + + elif same==1 and line[i] not in seqDatabaseSame: + if ":" in startrow: + give=self.getSameSeq(startrow) + else: + give=startrow.copy() + newrow= [startrow[-1], line[i]] + seqDatabaseSame[line[i]] = self.serchSame(database, newrow,give) + + else: + same=0 + i+=1 + + + if len(seqDatabase)!=0: + self.makeNext(seqDatabase,startrow) + if len(seqDatabaseSame)!=0: + self.makeNextSame(seqDatabaseSame,startrow)
+ + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self): + """ + Frequent pattern mining process will start from here + """ + self._Database = [] + self._startTime = _ab._time.time() + self._creatingItemSets() + self._Database=self.makeSupDatabase(self._Database,"") + self._minSup = self._convert(self._minSup) + self.makeSeqDatabaseFirst(self._Database) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Frequent patterns were generated successfully using prefixSpan algorithm ")
+ + +
+[docs] + def Mine(self): + """ + Frequent pattern mining process will start from here + """ + self._Database = [] + self._startTime = _ab._time.time() + self._creatingItemSets() + self._Database=self.makeSupDatabase(self._Database,"") + self._minSup = self._convert(self._minSup) + self.makeSeqDatabaseFirst(self._Database) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Frequent patterns were generated successfully using prefixSpan algorithm ")
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a, b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + pattern="" + x=re.sub("[\['\]]","",x) + for i in x.split(","): + + pattern=pattern+"\t"+str(i) + s1 = pattern + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Total number of Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = prefixSpan(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = prefixSpan(_ab._sys.argv[1], _ab._sys.argv[3]) + _ap.startMine() + _Patterns = _ap.getPatterns() + print("Total number of Frequent Patterns:", len(_Patterns)) + _ap.savePatterns(_ab._sys.argv[2]) + _memUSS = _ap.getMemoryUSS() + print("Total Memory in USS:", _memUSS) + _memRSS = _ap.getMemoryRSS() + print("Total Memory in RSS", _memRSS) + _run = _ap.getRuntime() + print("Total ExecutionTime in ms:", _run) + else: + _ap = prefixSpan('retail.txt',1000, ' ') + _ap.startMine() + _Patterns = _ap.getPatterns() + _memUSS = _ap.getMemoryUSS() + print("Total Memory in USS:", _memUSS) + _memRSS = _ap.getMemoryRSS() + print("Total Memory in RSS", _memRSS) + _run = _ap.getRuntime() + print("Total ExecutionTime in ms:", _run) + print("Total number of Frequent Patterns:", len(_Patterns)) + print("Error! The number of input parameters do not match the total number of parameters provided") + _ap.save("priOut2.txt") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/stablePeriodicFrequentPattern/basic/SPPEclat.html b/sphinx/_build/html/_modules/PAMI/stablePeriodicFrequentPattern/basic/SPPEclat.html new file mode 100644 index 000000000..247fc1ec6 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/stablePeriodicFrequentPattern/basic/SPPEclat.html @@ -0,0 +1,598 @@ + + + + + + PAMI.stablePeriodicFrequentPattern.basic.SPPEclat — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.stablePeriodicFrequentPattern.basic.SPPEclat

+# Stable periodic pattern mining aims to discover all interesting patterns in a temporal database using three constraints minimum support,
+# maximum period and maximum liability, that have support no less than the user-specified minimum support  constraint and liability no
+# greater than maximum liability.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.stablePeriodicFrequentPattern.basic import basic as alg
+#
+#             obj = alg.SPPEclat("../basic/sampleTDB.txt", 5, 3, 3)
+#
+#             obj.startMine()
+#
+#             Patterns = obj.getPatterns()
+#
+#             print("Total number of Stable Periodic Frequent Patterns:", len(Patterns))
+#
+#             obj.save("patterns")
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+import pandas as pd
+from deprecated import deprecated
+
+from PAMI.stablePeriodicFrequentPattern.basic import abstract as _ab
+
+
+[docs] +class SPPEclat(_ab._stablePeriodicFrequentPatterns): + """ + :Description: Stable periodic pattern mining aims to dicover all interesting patterns in a temporal database using three contraints minimum support, + maximum period and maximum lability, that have support no less than the user-specified minimum support constraint and lability no + greater than maximum lability. + + :Reference: Fournier-Viger, P., Yang, P., Lin, J. C.-W., Kiran, U. (2019). Discovering Stable Periodic-Frequent Patterns in Transactional Data. Proc. + 32nd Intern. Conf. on Industrial, Engineering and Other Applications of Applied Intelligent Systems (IEA AIE 2019), Springer LNAI, pp. 230-244 + + :param iFile: str : + Name of the Input file to mine complete set of stable periodic Frequent Pattern. + :param oFile: str : + Name of the output file to store complete set of stable periodic Frequent Pattern. + :param minSup: float or int or str : + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + :param itemSup: int or float : + Frequency of an item + :param maxLa: float : + minimum loss of a pattern + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the Input file or path of the input file + oFile : file + Name of the output file or path of the output file + minSup : int or float or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + maxPer : int or float or str + The user can specify maxPer either in count or proportion of database size. + If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + Otherwise, it will be treated as float. + Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float + maxLa : int or float or str + The user can specify maxLa either in count or proportion of database size. + If the program detects the data type of maxLa is integer, then it treats maxLa is expressed in count. + Otherwise, it will be treated as float. + Example: maxLa=10 will be treated as integer, while maxLa=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transactions + tree : class + it represents the Tree class + itemSetCount : int + it represents the total no of patterns + finalPatterns : dict + it represents to store the patterns + tidList : dict + stores the timestamps of an item + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of periodic-frequent patterns will be loaded in to an output file + getPatternsAsDataFrame() + Complete set of periodic-frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets() + Scan the database and store the items with their timestamps which are periodic frequent + calculateLa() + Calculates the support and period for a list of timestamps. + Generation() + Used to implement prefix class equivalence method to generate the periodic patterns recursively + + + + **Methods to execute code on terminal** + ----------------------------------------- + .. code-block:: console + + + Format: + + (.venv) $ python3 basic.py <inputFile> <outputFile> <minSup> <maxPer> <maxLa> + + Example usage: + + (.venv) $ python3 basic.py sampleDB.txt patterns.txt 10.0 4.0 2.0 + + + .. note:: constraints will be considered in percentage of database transactions + + **Importing this algorithm into a python program** + --------------------------------------------------- + ... code-block:: python + + from PAMI.stablePeriodicFrequentPattern.basic import basic as alg + + obj = alg.PFPECLAT("../basic/sampleTDB.txt", 5, 3, 3) + + obj.startMine() + + Patterns = obj.getPatterns() + + print("Total number of Stable Periodic Frequent Patterns:", len(Patterns)) + + obj.save("patterns") + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + -------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + _iFile = " " + _oFile = " " + _minSup = str() + _maxPer = str() + _maxLa = float() + _sep = " " + _SPPList = {} + _itemList = [] + _last = int() + _finalPatterns = {} + _tsList = {} + _startTime = float() + _endTime = float() + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + + def __init__(self, inputFile, minSup, maxPer, maxLa, sep='\t'): + self._iFile = inputFile + self._minSup = minSup + self._maxPer = maxPer + self._maxLa = maxLa + self._sep = sep + + def _creatingItemsets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + if 'Patterns' in i: + self._Database = self._iFile['Patterns'].tolist() + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _convert(self, value): + """ + to convert the type of user specified minSup value + + :param value: user specified minSup value + :return: converted type + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + def _createSPPList(self): + """ + to convert the single length stable periodic patterns + """ + tidLast = {} + la = {} + self._SPPList = {} + self._tsList = {} + for transaction in self._Database: + ts = int(transaction[0]) + for item in transaction[1:]: + if item not in self._SPPList: + la[item] = max(0, ts - self._maxPer) + self._SPPList[item] = [1, la[item]] + self._tsList[item] = [ts] + else: + s = self._SPPList[item][0] + 1 + la[item] = max(0, la[item] + ts - tidLast.get(item) - self._maxPer) + self._SPPList[item] = [s, max(la[item], self._SPPList[item][1])] + self._tsList[item].append(ts) + tidLast[item] = ts + self._last = ts + for item in self._SPPList: + la[item] = max(0, la[item] + self._last - tidLast[item] - self._maxPer) + self._SPPList[item][1] = max(la[item], self._SPPList[item][1]) + self._SPPList = {k: v for k, v in self._SPPList.items() if v[0] >= self._minSup and v[1] <= self._maxLa} + self._SPPList = {k: v for k, v in sorted(self._SPPList.items(), key=lambda x: x[1][0], reverse=True)} + self._Generation(list(self._SPPList), set()) + + def _Generation(self, GPPFList, CP): + """ + To generate the patterns using depth-first search + """ + for i in range(len(GPPFList)): + item = GPPFList[i] + CP1 = CP | {item} + if CP != set(): + self._tsList['\t'.join(CP1)] = list(set(self._tsList['\t'.join(CP)]) & set(self._tsList[item])) + la = self._calculateLa(self._tsList['\t'.join(CP1)]) + support = len(self._tsList['\t'.join(CP1)]) + if la <= self._maxLa and len(self._tsList['\t'.join(CP1)]) >= self._minSup: + #CP = CP1 + self._finalPatterns['\t'.join(CP1)] = [support, la] + if i+1 < len(GPPFList): + self._Generation(GPPFList[i+1:], CP1) + + def _calculateLa(self, tsList): + """ + To calculate the liability of a patterns based on its timestamps + """ + previous = 0 + la = 0 + tsList = sorted(tsList) + laList = [] + for ts in tsList: + la = max(0, la + ts - previous - self._maxPer) + laList.append(la) + previous = ts + + la = max(0, la + self._last - previous - self._maxPer) + laList.append(la) + maxla = max(laList) + return maxla + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self): + """ + Method to start the mining of patterns + """ + self.mine()
+ + +
+[docs] + def mine(self): + """ + Method to start the mining of patterns + """ + self._startTime = _ab._time.time() + self._creatingItemsets() + self._minSup = self._convert(self._minSup) + self._maxPer = self._convert(self._maxPer) + self._maxLa = self._convert(self._maxLa) + self._finalPatterns = {} + #print(self._minSup, self._maxPer, self._maxLa) + self._createSPPList() + self._endTime = _ab._time.time() + self._memoryUSS = float() + self._memoryRSS = float() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Stable Periodic Frequent patterns were generated successfully using basic algorithm ")
+ + + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime - self._startTime
+ + +
+[docs] + def getPatterns(self): + """ + Function to return the set of stable periodic-frequent patterns after completion of the mining process + + :return: returning stable periodic-frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of periodic-frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.strip() + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final periodic-frequent patterns in a dataframe + + :return: returning periodic-frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b[0], b[1]]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Periodicity']) + return dataFrame
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Total number of Stable Periodic Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + +if __name__ == '__main__': + _ap = str() + if len(_ab._sys.argv) == 6 or len(_ab._sys.argv) == 7: + if len(_ab._sys.argv) == 7: + _ap = SPPEclat(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5], _ab._sys.argv[6]) + if len(_ab._sys.argv) == 6: + _ap = SPPEclat(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + _ap.startMine() + _ap.mine() + print("Total number of Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/stablePeriodicFrequentPattern/topK/TSPIN.html b/sphinx/_build/html/_modules/PAMI/stablePeriodicFrequentPattern/topK/TSPIN.html new file mode 100644 index 000000000..10510da31 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/stablePeriodicFrequentPattern/topK/TSPIN.html @@ -0,0 +1,882 @@ + + + + + + PAMI.stablePeriodicFrequentPattern.topK.TSPIN — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.stablePeriodicFrequentPattern.topK.TSPIN

+# TSPIN is an algorithm to discover top stable periodic-frequent patterns in a transactional database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#     from PAMI.stablePeriodicFrequentPattern.basic import TSPIN as alg
+#
+#     obj = alg.TSPIN(iFile, maxPer, maxLa, k)
+#
+#     obj.startMine()
+#
+#     stablePeriodicFrequentPatterns = obj.getPatterns()
+#
+#     print("Total number of Periodic Frequent Patterns:", len(stablePeriodicFrequentPatterns))
+#
+#     obj.savePatterns(oFile)
+#
+#     Df = obj.getPatternsAsDataFrame()
+#
+#     memUSS = obj.getMemoryUSS()
+#
+#     print("Total Memory in USS:", memUSS)
+#
+#     memRSS = obj.getMemoryRSS()
+#
+#     print("Total Memory in RSS", memRSS)
+#
+#     run = obj.getRuntime()
+#
+#     print("Total ExecutionTime in seconds:", run)
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.stablePeriodicFrequentPattern.topK import abstract as _ab
+from typing import List, Dict, Tuple, Set, Union, Any, Generator
+
+
+_maxPer = float()
+_maxLa = float()
+_k = float()
+_lno = int()
+_last = int()
+
+
+class _Node(object):
+    """
+        A class used to represent the node of stablePeriodicFrequentPatternTree
+
+        :Attributes:
+
+            item : int or None
+                Storing item of a node
+            timeStamps : list
+                To maintain the timestamps of a database at the end of the branch
+            parent : node
+                To maintain the parent of every node
+            children : list
+                To maintain the children of a node
+
+        :Methods:
+
+            addChild(itemName)
+                Storing the children to their respective parent nodes
+        """
+
+    def __init__(self, item, children) -> None:
+        """
+        Initializing the Node class
+
+        :param item: Storing the item of a node
+        :type item: int or None
+        :param children: To maintain the children of a node
+        :type children: dict
+        """
+
+        self.item = item
+        self.children = children
+        self.parent = None
+        self.timeStamps = []
+
+    def addChild(self, node) -> None:
+        """
+        To add the children to a node
+
+        :param node: parent node in the tree
+        """
+
+        self.children[node.item] = node
+        node.parent = self
+
+
+class _Tree(object):
+    """
+    A class used to represent the stablePeriodic frequentPatternGrowth tree structure
+
+    :Attributes:
+
+        root : Node
+            Represents the root node of the tree
+        summaries : dictionary
+            Storing the nodes with same item name
+        info : dictionary
+            Stores the support of the items
+
+
+    :Methods:
+
+        addTransactions(Database)
+            Creating transaction as a branch in frequentPatternTree
+        getConditionalPatterns(Node)
+            Generates the conditional patterns from tree for specific node
+        conditionalTransaction(prefixPaths,Support)
+            Takes the prefixPath of a node and support at child of the path and extract the frequent patterns from
+            prefixPaths and generates prefixPaths with items which are frequent
+        remove(Node)
+            Removes the node from tree once after generating all the patterns respective to the node
+        generatePatterns(Node)
+            Starts from the root node of the tree and mines the periodic-frequent patterns
+        """
+
+    def __init__(self) -> None:
+        self.root = _Node(None, {})
+        self.summaries = {}
+        self.info = {}
+
+    def addTransaction(self, transaction, tid) -> None:
+        """
+        Adding a transaction into tree
+
+        :param transaction: To represent the complete database
+        :type transaction: list
+        :param tid: To represent the timestamp of a database
+        :type tid: list
+        :return: pfp-growth tree
+        """
+
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _Node(transaction[i], {})
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].append(newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+        currentNode.timeStamps = currentNode.timeStamps + tid
+
+    def getConditionalPatterns(self, alpha) -> None:
+        """
+        Generates all the conditional patterns of a respective node
+
+        :param alpha: To represent a Node in the tree
+        :type alpha: Node
+        :return: A tuple consisting of finalPatterns, conditional pattern base and information
+        """
+        finalPatterns = []
+        finalSets = []
+        for i in self.summaries[alpha]:
+            set1 = i.timeStamps
+            set2 = []
+            while i.parent.item is not None:
+                set2.append(i.parent.item)
+                i = i.parent
+            if len(set2) > 0:
+                set2.reverse()
+                finalPatterns.append(set2)
+                finalSets.append(set1)
+        finalPatterns, finalSets, info = self.conditionalDatabases(finalPatterns, finalSets)
+        return finalPatterns, finalSets, info
+
+    @staticmethod
+    def generateTimeStamps(node) -> list:
+        """
+        To get the timestamps of a node
+
+        :param node: A node in the tree
+        :return: Timestamps of a node
+        """
+
+        finalTimeStamps = node.timeStamps
+        return finalTimeStamps
+
+    def removeNode(self, nodeValue) -> None:
+        """ Removing the node from tree
+
+        :param nodeValue: To represent a node in the tree
+        :type nodeValue: node
+        :return: Tree with their nodes updated with timestamps
+        """
+
+        for i in self.summaries[nodeValue]:
+            i.parent.timeStamps = i.parent.timeStamps + i.timeStamps
+            del i.parent.children[nodeValue]
+
+    def getTimeStamps(self, alpha) -> list:
+        """
+        To get all the timestamps of the nodes which share same item name
+
+        :param alpha: Node in a tree
+        :return: Timestamps of a  node
+        """
+        temporary = []
+        for i in self.summaries[alpha]:
+            temporary += i.timeStamps
+        return temporary
+
+    @staticmethod
+    def getSupportAndPeriod(timeStamps) -> tuple:
+        """
+        To calculate the periodicity and support
+
+        :param timeStamps: Timestamps of an item set
+        :return: support, periodicity
+        """
+
+        global _maxPer, _last
+        previous = 0
+        la = 0
+        tsList = sorted(timeStamps)
+        for ts in tsList:
+            la = max(0, la + ts - previous - _maxPer)
+            previous = ts
+        la = max(0, la + _last - previous - _maxPer)
+        return len(timeStamps), la
+
+    def conditionalDatabases(self, conditionalPatterns, conditionalTimeStamps) -> tuple:
+        """
+        It generates the conditional patterns with periodic-frequent items
+
+        :param conditionalPatterns: conditionalPatterns generated from conditionPattern method of a respective node
+        :type conditionalPatterns: list
+        :param conditionalTimeStamps: Represents the timestamps of a conditional patterns of a node
+        :type conditionalTimeStamps: list
+        :returns: Returns conditional transactions by removing non-periodic and non-frequent items
+        """
+
+        global _maxPer, _minSup
+        pat = []
+        timeStamps = []
+        data1 = {}
+        for i in range(len(conditionalPatterns)):
+            for j in conditionalPatterns[i]:
+                if j in data1:
+                    data1[j] = data1[j] + conditionalTimeStamps[i]
+                else:
+                    data1[j] = conditionalTimeStamps[i]
+        updatedDictionary = {}
+        for m in data1:
+            updatedDictionary[m] = self.getSupportAndPeriod(data1[m])
+        updatedDictionary = {k: v for k, v in updatedDictionary.items() if v[1] <= _maxLa}
+        count = 0
+        for p in conditionalPatterns:
+            p1 = [v for v in p if v in updatedDictionary]
+            trans = sorted(p1, key=lambda x: (updatedDictionary.get(x)[0], -x), reverse=True)
+            if len(trans) > 0:
+                pat.append(trans)
+                timeStamps.append(conditionalTimeStamps[count])
+            count += 1
+        return pat, timeStamps, updatedDictionary
+
+    def generatePatterns(self, minSup, prefix, Qk) -> None:
+        """
+        Generates the patterns
+
+        :param prefix: Forms the combination of items
+        :type prefix: list
+        :returns: yields patterns with their support and periodicity
+        """
+
+        global _k
+        for i in sorted(self.summaries, key=lambda x: (self.info.get(x)[0], -x)):
+            pattern = prefix[:]
+            pattern.append(i)
+            Qk[tuple(pattern)] = self.info[i]
+            if len(Qk) >= _k:
+                minSup = min([v[0] for v in Qk.values()])
+            if len(Qk) > _k:
+                temp = min([v[0] for v in Qk.values()])
+                res = [key for key in Qk if Qk[key] == temp]
+                for j in res:
+                    Qk[j] = None
+            patterns, timeStamps, info = self.getConditionalPatterns(i)
+            conditionalTree = _Tree()
+            conditionalTree.info = info.copy()
+            for pat in range(len(patterns)):
+                conditionalTree.addTransaction(patterns[pat], timeStamps[pat])
+            if len(patterns) > 0:
+                conditionalTree.generatePatterns(minSup, pattern, Qk)
+            self.removeNode(i)
+
+
+
+[docs] +class TSPIN(_ab._stablePeriodicFrequentPatterns): + """ + :Description: TSPIN is an algorithm to discover top stable periodic-frequent patterns in a transactional database. + + :Reference: Fournier-Viger, P., Wang, Y., Yang, P. et al. TSPIN: mining top-k stable periodic patterns. + Appl Intell 52, 6917–6938 (2022). https://doi.org/10.1007/s10489-020-02181-6 + + :param iFile: str : + Name of the Input file to mine complete set of frequent pattern's + :param oFile: str : + Name of the output file to store complete set of frequent patterns + :param maxPer: float: + Maximum number of frequent patterns to be included in the output file. + :param maxLa: str: + Maximum number of frequent patterns to be included in the output file. + + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + iFile : file + Name of the Input file or path of the input file + oFile : file + Name of the output file or path of the output file + maxPer : int or float or str + The user can specify maxPer either in count or proportion of database size. + If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + Otherwise, it will be treated as float. + Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float + maxLa : int or float or str + The user can specify maxLa either in count or proportion of database size. + If the program detects the data type of maxLa is integer, then it treats maxLa is expressed in count. + Otherwise, it will be treated as float. + Example: maxLa=10 will be treated as integer, while maxLa=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime : float + To record the start time of the mining process + endTime : float + To record the completion time of the mining process + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + To represent the total no of transaction + tree : class + To represents the Tree class + itemSetCount : int + To represents the total no of patterns + finalPatterns : dict + To store the complete patterns + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of periodic-frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of periodic-frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets(fileName) + Scans the dataset and stores in a list format + PeriodicFrequentOneItem() + Extracts the one-periodic-frequent patterns from database + updateDatabases() + Update the database by removing aperiodic items and sort the Database by item decreased support + buildTree() + After updating the Database, remaining items will be added into the tree by setting root node as null + convert() + to convert the user specified value + + **Methods to execute code on terminal** + ------------------------------------------ + Format: + >>> python3 TSPIN.py <inputFile> <outputFile> <maxPer> <maxLa> + + Example: + >>> python3 TSPIN.py sampleTDB.txt patterns.txt 0.3 0.4 0.6 + + .. note:: maxPer, maxLa and k will be considered in percentage of database transactions + + **Importing this algorithm into a python program** + ---------------------------------------------------- + .. code-block:: python + + from PAMI.stablePeriodicFrequentPattern.basic import TSPIN as alg + + obj = alg.TSPIN(iFile, maxPer, maxLa, k) + + obj.startMine() + + stablePeriodicFrequentPatterns = obj.getPatterns() + + print("Total number of Periodic Frequent Patterns:", len(stablePeriodicFrequentPatterns)) + + obj.savePatterns(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + --------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + + """ + _startTime = float() + _endTime = float() + _maxLa = str() + _maxPer = float() + _k = float() + _SPPList = {} + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _rank = {} + _rankedUp = {} + _lno = 0 + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + data, ts = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + ts = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + for i in range(len(data)): + tr = [ts[i][0]] + tr = tr + data[i] + self._Database.append(tr) + + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + count = 0 + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + count += 1 + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + + def _periodicFrequentOneItem(self) -> Tuple[Dict[str, List[int]], List[str]]: + """ + Calculates the support of each item in the database and assign ranks to the items by decreasing support and returns the frequent items list + + :returns: return the one-length periodic frequent patterns + """ + global _last + tidLast = {} + la = {} + for transaction in self._Database: + ts = int(transaction[0]) + for item in transaction[1:]: + if item not in self._SPPList: + la[item] = max(0, ts - self._maxPer) + self._SPPList[item] = [1, la[item]] + else: + s = self._SPPList[item][0] + 1 + la[item] = max(0, la[item] + ts - tidLast.get(item) - self._maxPer) + self._SPPList[item] = [s, max(la[item], self._SPPList[item][1])] + tidLast[item] = ts + _last = ts + for item in self._SPPList: + la[item] = max(0, la[item] + _last - tidLast[item] - self._maxPer) + self._SPPList[item][1] = max(la[item], self._SPPList[item][1]) + self._SPPList = {k: v for k, v in self._SPPList.items() if v[1] <= self._maxLa} + self._SPPList = {k: v for k, v in sorted(self._SPPList.items(), key=lambda x: (x[1][0]), reverse=True)} + data = self._SPPList + pfList = [k for k, v in data.items()] + self._rank = dict([(index, item) for (item, index) in enumerate(pfList)]) + return data, pfList + + def _updateDatabases(self, dict1: Dict[str, List[int]]) -> List[List[int]]: + """ + Remove the items which are not frequent from database and updates the database with rank of items + + :param dict1: frequent items with support + :type dict1: dictionary + :return: Sorted and updated transactions + """ + list1 = [] + for tr in self._Database: + list2 = [int(tr[0])] + for i in range(1, len(tr)): + if tr[i] in dict1: + list2.append(self._rank[tr[i]]) + if len(list2) >= 2: + basket = list2[1:] + basket.sort() + list2[1:] = basket[0:] + list1.append(list2) + return list1 + + @staticmethod + def _buildTree(data: List[List[int]], info: Dict[int, List[int]]) -> _Tree: + """ + It takes the database and support of each item and construct the main tree by setting root node as a null + + :param data: it represents the one Database in database + :type data: list + :param info: it represents the support of each item + :type info: dictionary + :return: returns root node of tree + """ + + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(data)): + set1 = [data[i][0]] + rootNode.addTransaction(data[i][1:], set1) + return rootNode + + def _savePeriodic(self, itemSet: List[str]) -> str: + """ + To convert the ranks of items in to their original item names + + :param itemSet: frequent pattern. + :return: frequent pattern with original item names + """ + t1 = str() + for i in itemSet: + t1 = t1 + self._rankedUp[i] + " " + return t1 + + def _convert(self, value: Union[int, float, str]) -> Union[int, float]: + """ + To convert the given user specified value + + :param value: user specified value + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + +
+[docs] + def startMine(self) -> None: + """ + Mining process will start from this function + """ + + global _maxLa, _maxPer, _k, _lno + self._startTime = _ab._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._maxLa is None: + raise Exception("Please enter the Minimum Support") + self._creatingItemSets() + self._maxLa = self._convert(self._maxLa) + self._maxPer = self._convert(self._maxPer) + self._k = self._convert(self._k) + _maxLa, _maxPer, _k, _lno = self._maxLa, self._maxPer, self._k, len(self._Database) + if self._maxLa > len(self._Database): + raise Exception("Please enter the minSup in range between 0 to 1") + generatedItems, pfList = self._periodicFrequentOneItem() + updatedDatabases = self._updateDatabases(generatedItems) + for x, y in self._rank.items(): + self._rankedUp[y] = x + info = {self._rank[k]: v for k, v in generatedItems.items()} + Tree = self._buildTree(updatedDatabases, info) + patterns = {} + Tree.generatePatterns(1, [], patterns) + self._finalPatterns = {} + for x, y in patterns.items(): + sample = self._savePeriodic(x) + self._finalPatterns[sample] = y + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Top-K Stable Periodic patterns were generated successfully using TSPIN algorithm ")
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final periodic-frequent patterns in a dataframe + + :return: returning periodic-frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a, b[0], b[1]]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Periodicity']) + return dataFrame
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of periodic-frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> dict: + """ + Function to send the set of periodic-frequent patterns after completion of the mining process + + :return: returning periodic-frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + """ + print("Total number of Periodic Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 6 or len(_ab._sys.argv) == 7: + if len(_ab._sys.argv) == 7: + _ap = TSPIN(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5], _ab._sys.argv[6]) + if len(_ab._sys.argv) == 6: + _ap = TSPIN(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + _ap.startMine() + _Patterns = _ap.getPatterns() + print("Total number of Patterns:", len(_Patterns)) + _ap.save(_ab._sys.argv[2]) + _memUSS = _ap.getMemoryUSS() + print("Total Memory in USS:", _memUSS) + _memRSS = _ap.getMemoryRSS() + print("Total Memory in RSS", _memRSS) + _run = _ap.getRuntime() + print("Total ExecutionTime in ms:", _run) + else: + _ap = TSPIN('/Users/Likhitha/Downloads/SPP_sample.txt', 5, 1, 1, ' ') + _ap.startMine() + print(len(_ap._Database)) + _Patterns = _ap.getPatterns() + for x, y in _Patterns.items(): + print(x, y) + print("Total number of Patterns:", len(_Patterns)) + _ap.save('/Users/Likhitha/Downloads/output.txt') + _memUSS = _ap.getMemoryUSS() + print("Total Memory in USS:", _memUSS) + _memRSS = _ap.getMemoryRSS() + print("Total Memory in RSS", _memRSS) + _run = _ap.getRuntime() + print("Total ExecutionTime in ms:", _run) + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/dfsCode.html b/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/dfsCode.html new file mode 100644 index 000000000..d99769306 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/dfsCode.html @@ -0,0 +1,221 @@ + + + + + + PAMI.subgraphMining.basic.dfsCode — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.subgraphMining.basic.dfsCode

+import pickle
+
+
+[docs] +class DFSCode: + def __init__(self): + self.rightMost = -1 + self.size = 0 + self.rightMostPath = [] + self.eeList = [] + +
+[docs] + def copy(self): + return pickle.loads(pickle.dumps(self))
+ + +
+[docs] + def notPreOfRm(self, v): + """ + This function checks if a given value is not the second-to-last element on the + `rightMostPath` given a vertex. + """ + if len(self.rightMostPath) <= 1: + return True + return v != self.rightMostPath[-2]
+ + +
+[docs] + def getAllVLabels(self): + """ + This function retrieves all vertex labels from the extended edge list and returns them in a list. + """ + labels = [] + vertexMap = {} + for ee in self.eeList: + v1, v1Label = ee.getV1(), ee.getVLabel1() + v2, v2Label = ee.getV2(), ee.getVLabel2() + vertexMap[v1] = v1Label + vertexMap[v2] = v2Label + + count = 0 + while count in vertexMap: + labels.append(vertexMap[count]) + count += 1 + return labels
+ + +
+[docs] + def add(self, ee): + """ + The `add` function in adds elements to the EE list while updating the rightmost element and path + based on certain conditions. + """ + if self.size == 0: + self.rightMost = 1 + self.rightMostPath.extend([0, 1]) + else: + v1, v2 = ee.getV1(), ee.getV2() + if v1 < v2: + self.rightMost = v2 + while self.rightMostPath and self.rightMostPath[-1] > v1: + self.rightMostPath.pop() + self.rightMostPath.append(v2) + + self.eeList.append(ee) + self.size += 1
+ + +
+[docs] + def getAt(self, index): + return self.eeList[index]
+ + +
+[docs] + def onRightMostPath(self, v): + return v in self.rightMostPath
+ + +
+[docs] + def containEdge(self, v1, v2): + for ee in self.eeList: + if (ee.getV1() == v1 and ee.getV2() == v2) or (ee.getV1() == v2 and ee.getV2() == v1): + return True + return False
+ + +
+[docs] + def isEmpty(self): + return not self.eeList
+ + +
+[docs] + def getRightMost(self): + return self.rightMost
+ + +
+[docs] + def getRightMostPath(self): + return self.rightMostPath
+ + +
+[docs] + def getEeList(self): + return self.eeList
+ + + def __str__(self): + return "DFSCode: " + " ".join(str(ee) for ee in self.eeList)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/edge.html b/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/edge.html new file mode 100644 index 000000000..2616d55d7 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/edge.html @@ -0,0 +1,136 @@ + + + + + + PAMI.subgraphMining.basic.edge — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.subgraphMining.basic.edge

+
+[docs] +class Edge: + def __init__(self, v1, v2, edgeLabel): + self.v1 = v1 + self.v2 = v2 + self.edgeLabel = edgeLabel + self.hashcode = (v1 + 1) * 100 + (v2 + 1) * 10 + edgeLabel + +
+[docs] + def another(self, v): + return self.v2 if v == self.v1 else self.v1
+ + +
+[docs] + def getEdgeLabel(self): + return self.edgeLabel
+ + + def __hash__(self): + return self.hashcode + + def __eq__(self, other): + if not isinstance(other, Edge): + return False + return (self.hashcode == other.hashcode and + self.v1 == other.v1 and + self.v2 == other.v2 and + self.edgeLabel == other.edgeLabel) + + def __repr__(self): + return f"Edge(v1: {self.v1}, v2: {self.v2}, Label: {self.edgeLabel})"
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/extendedEdge.html b/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/extendedEdge.html new file mode 100644 index 000000000..6b49ed35a --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/extendedEdge.html @@ -0,0 +1,210 @@ + + + + + + PAMI.subgraphMining.basic.extendedEdge — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.subgraphMining.basic.extendedEdge

+
+[docs] +class ExtendedEdge: + def __init__(self, v1, v2, vLabel1, vLabel2, edgeLabel): + self.v1 = v1 + self.v2 = v2 + self.vLabel1 = vLabel1 + self.vLabel2 = vLabel2 + self.edgeLabel = edgeLabel + self.hashcode = (1 + v1) * 100 + (1 + v2) * 50 + (1 + vLabel1) * 30 + (1 + vLabel2) * 20 + (1 + edgeLabel) + +
+[docs] + def smallerThan(self, that): + if that is None: + return True + + x1, x2, y1, y2 = self.v1, self.v2, that.v1, that.v2 + + if self.pairSmallerThan(x1, x2, y1, y2): + return True + elif x1 == y1 and x2 == y2: + return (self.vLabel1 < that.vLabel1 or + (self.vLabel1 == that.vLabel1 and self.vLabel2 < that.vLabel2) or + (self.vLabel1 == that.vLabel1 and self.vLabel2 == that.vLabel2 and + self.edgeLabel < that.edgeLabel)) + else: + return False
+ + + +
+[docs] + def smallerThanOriginal(self, that): + if that is None: + return True + + x1, x2, y1, y2 = self.v1, self.v2, that.v1, that.v2 + + if self.pairSmallerThan(x1, x2, y1, y2): + return True + elif x1 == y1 and x2 == y2: + return (self.vLabel1 < that.vLabel1 or + (self.vLabel1 == that.vLabel1 and self.edgeLabel < that.edgeLabel) or + (self.vLabel1 == that.vLabel1 and self.edgeLabel == that.edgeLabel and + self.vLabel2 < that.vLabel2)) + else: + return False
+ + +
+[docs] + def pairSmallerThan(self, x1, x2, y1, y2): + xForward = x1 < x2 + yForward = y1 < y2 + + if xForward and yForward: + return x2 < y2 or (x2 == y2 and x1 > y1) + elif not xForward and not yForward: + return x1 < y1 or (x1 == y1 and x2 < y2) + elif xForward: + return x2 <= y1 + else: + return x1 < y2
+ + + def __hash__(self): + return self.hashcode + + def __eq__(self, other): + if not isinstance(other, ExtendedEdge): + return False + return (self.v1 == other.v1 and self.v2 == other.v2 and + self.vLabel1 == other.vLabel1 and self.vLabel2 == other.vLabel2 and + self.edgeLabel == other.edgeLabel) + + def __repr__(self): + return f"<{self.v1},{self.v2},{self.vLabel1},{self.vLabel2},{self.edgeLabel}>" + +
+[docs] + def getV1(self): + return self.v1
+ + +
+[docs] + def getV2(self): + return self.v2
+ + +
+[docs] + def getVLabel1(self): + return self.vLabel1
+ + +
+[docs] + def getVLabel2(self): + return self.vLabel2
+ + +
+[docs] + def getEdgeLabel(self): + return self.edgeLabel
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/frequentSubgraph.html b/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/frequentSubgraph.html new file mode 100644 index 000000000..e99feef2c --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/frequentSubgraph.html @@ -0,0 +1,124 @@ + + + + + + PAMI.subgraphMining.basic.frequentSubgraph — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.subgraphMining.basic.frequentSubgraph

+
+[docs] +class FrequentSubgraph: + def __init__(self, dfsCode, setOfGraphsIds, support): + self.dfsCode = dfsCode + self.setOfGraphsIds = setOfGraphsIds + self.support = support + + def __eq__(self, other): + if not isinstance(other, FrequentSubgraph): + return NotImplemented + return self.support == other.support + + def __lt__(self, other): + if not isinstance(other, FrequentSubgraph): + return NotImplemented + return self.support < other.support + + def __gt__(self, other): + if not isinstance(other, FrequentSubgraph): + return NotImplemented + return self.support > other.support
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/graph.html b/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/graph.html new file mode 100644 index 000000000..d14674e64 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/graph.html @@ -0,0 +1,301 @@ + + + + + + PAMI.subgraphMining.basic.graph — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.subgraphMining.basic.graph

+from .edge import Edge
+from .vertex import Vertex
+
+
+[docs] +class Graph: + emptyVertexList = [] + emptyIntegerArray = [] + + def __init__(self, id, vMap=None, dfsCode=None): + """ + The `__init__` function initializes a graph object with optional parameters for vertex mapping and + DFS code. + """ + self.vMap = {} + self.id = id + if vMap is not None: + self.vMap = vMap + elif dfsCode is not None: + for ee in dfsCode.getEeList(): + v1, v2, v1Label, v2Label, eLabel = ee.v1, ee.v2, ee.vLabel1, ee.vLabel2, ee.edgeLabel + + e = Edge(v1, v2, eLabel) + if v1 not in self.vMap: + self.vMap[v1] = Vertex(v1, v1Label) + if v2 not in self.vMap: + self.vMap[v2] = Vertex(v2, v2Label) + + self.vMap[v1].addEdge(e) + self.vMap[v2].addEdge(e) + + self.id = -1 + + self.vertices = [] + self.neighborCache = {} + self.mapLabelToVertexIds = {} + self.edgeCount = 0 + + self.precalculateVertexList() + self.precalculateVertexNeighbors() + self.precalculateLabelsToVertices() + +
+[docs] + def getId(self): + return self.id
+ + +
+[docs] + def removeInfrequentLabel(self, label): + """ + The function removes vertices with a specific label from the graph and updates the edges accordingly. + """ + toRemove = [key for key, vertex in self.vMap.items() if vertex.getLabel() == label] + for key in toRemove: + del self.vMap[key] + + for vertex in self.vMap.values(): + edgesToRemove = [edge for edge in vertex.getEdgeList() + if edge.v1 not in self.vMap or edge.v2 not in self.vMap] + + for edge in edgesToRemove: + vertex.getEdgeList().remove(edge)
+ + +
+[docs] + def precalculateVertexNeighbors(self): + """ + The function precalculates the neighbors of each vertex in a graph and stores them in a cache. + """ + self.neighborCache = {} + self.edgeCount = 0 + + for vertexId, vertex in self.vMap.items(): + neighbors = [] + + for edge in vertex.getEdgeList(): + neighborVertex = self.vMap[edge.another(vertexId)] + neighbors.append(neighborVertex) + + neighbors.sort(key=lambda x: x.id) + + self.neighborCache[vertexId] = neighbors + self.edgeCount += len(neighbors) + + self.edgeCount //= 2
+ + +
+[docs] + def precalculateVertexList(self): + """ + The function precalculateVertexList creates a list of vertices by iterating through a dictionary of + vertices. + """ + self.vertices = [] + for _, vertex in self.vMap.items(): + self.vertices.append(vertex)
+ + +
+[docs] + def precalculateLabelsToVertices(self): + """ + This function precalculates and stores mappings of vertex labels to their corresponding vertex IDs. + """ + self.mapLabelToVertexIds = {} + for vertex in self.vertices: + label = vertex.getLabel() + if label not in self.mapLabelToVertexIds: + sameIds = [v.getId() for v in self.vertices if v.getLabel() == label] + self.mapLabelToVertexIds[label] = sameIds
+ + +
+[docs] + def findAllWithLabel(self, targetLabel): + if targetLabel in self.mapLabelToVertexIds: + return self.mapLabelToVertexIds[targetLabel] + else: + return []
+ + +
+[docs] + def getAllNeighbors(self, v): + try: + neighbors = self.neighborCache[v] + except KeyError: + neighbors = [] + return neighbors
+ + +
+[docs] + def getVLabel(self, v): + return self.vMap[v].getLabel()
+ + +
+[docs] + def getEdgeLabel(self, v1, v2): + for e in self.vMap.get(v1).getEdgeList(): + if e.v1 == v1 and e.v2 == v2: + return e.getEdgeLabel() + elif e.v1 == v2 and e.v2 == v1: + return e.getEdgeLabel() + return -1
+ + +
+[docs] + def getEdge(self, v1, v2): + for e in self.vMap.get(v1).getEdgeList(): + if e.v1 == v1 and e.v2 == v2: + return e + elif e.v1 == v2 and e.v2 == v1: + return e + return None
+ + +
+[docs] + def getNonPrecalculatedAllVertices(self): + return list(self.vMap.values())
+ + +
+[docs] + def isNeighboring(self, v1, v2): + neighborsOfV1 = self.neighborCache.get(v1, []) + low = 0 + high = len(neighborsOfV1) - 1 + + while high >= low: + middle = (low + high) // 2 + val = neighborsOfV1[middle].id + if val == v2: + return True + if val < v2: + low = middle + 1 + if val > v2: + high = middle - 1 + return False
+ + +
+[docs] + def getAllVertices(self): + return self.vertices
+ + +
+[docs] + def getEdgeCount(self): + return self.edgeCount
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/gspan.html b/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/gspan.html new file mode 100644 index 000000000..25237cf01 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/gspan.html @@ -0,0 +1,806 @@ + + + + + + PAMI.subgraphMining.basic.gspan — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.subgraphMining.basic.gspan

+# gSpan is a subgraph mining algorithm that uses DFS and DFS codes to mine subgraphs
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#             from PAMI.subgraphMining.basic import gspan as alg
+#
+#             obj = alg.GSpan(iFile, minSupport)
+#
+#             obj.startMine()
+#
+#             obj.run()
+#
+#             frequentGraphs = obj.getFrequentSubgraphs()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             obj.save(oFile)
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+from PAMI.subgraphMining.basic import abstract as _ab
+
+
+[docs] +class GSpan(_ab._gSpan): + + eliminate_infrequent_vertices = True + eliminate_infrequent_vertex_pairs = True + eliminate_infrequent_edge_labels = True + edge_count_pruning = True + + def __init__(self, iFile, minSupport, outputSingleVertices=True, maxNumberOfEdges=float('inf'), outputGraphIds=False) -> None: + """ + Initialize variables + """ + + self.minSup = minSupport + self.frequentSubgraphs = [] + self._runtime = 0 + self.maxMem = 0 + self.graphCount = 0 + self.patternCount = 0 + self.frequentVertexLabels = [] + self.infrequentVerticesRemovedCount = 0 + self.infrequentVertexPairsRemoved = 0 + self.edgeRemovedByLabel = 0 + self.eliminatedWithMaxSize = 0 + self.emptyGraphsRemoved = 0 + self.pruneByEdgeCount = 0 + self.inPath = iFile + self.outPath = None + self.outputSingleVertices = outputSingleVertices + self.maxNumberOfEdges = maxNumberOfEdges + self.outputGraphIds = outputGraphIds + self._memoryUSS = float() + self._memoryRSS = float() + + +
+[docs] + def startMine(self): + + if self.maxNumberOfEdges <= 0: + return + + self.frequentSubgraphs = [] + + self.patternCount = 0 + + # Record the start time + t1 = _ab.time.time() + + # Read graphs + graphDb = self.readGraphs(self.inPath) + + # Calculate minimum support as a number of graphs + self.minSup = _ab.math.ceil(self.minSup * len(graphDb)) + + # Mining + self.gSpan(graphDb, self.outputSingleVertices) + + # Output + # self.writeResultToFile(self.outPath) + + t2 = _ab.time.time() + + #Calculate runtime + self._runtime = (t2 - t1) + + process = _ab._psutil.Process(_ab._os.getpid()) + + self._memoryUSS = float() + + self._memoryRSS = float() + + self._memoryUSS = process.memory_full_info().uss + + self._memoryRSS = process.memory_info().rss + + self.patternCount = len(self.frequentSubgraphs)
+ + + +
+[docs] + def save(self, oFile): + """ + The `save` function writes information about frequent subgraphs to a specified + output file in a specific format. + + :param outputPath: The `save` method is used to write the results of frequent + subgraphs to a file specified by the `outputPath` parameter. The method iterates over each + frequent subgraph in `self.frequentSubgraphs` and writes the subgraph information to the file + """ + with open(oFile, 'w') as bw: + i = 0 + for subgraph in self.frequentSubgraphs: + sb = [] + + dfsCode = subgraph.dfsCode + sb.append(f"t # {i} * {subgraph.support}\n") + if dfsCode.size == 1: + ee = dfsCode.getEeList()[0] + if ee.edgeLabel == -1: + sb.append(f"v 0 {ee.vLabel1}\n") + else: + sb.append(f"v 0 {ee.vLabel1}\n") + sb.append(f"v 1 {ee.vLabel2}\n") + sb.append(f"e 0 1 {ee.edgeLabel}\n") + else: + vLabels = dfsCode.getAllVLabels() + for j, vLabel in enumerate(vLabels): + sb.append(f"v {j} {vLabel}\n") + for ee in dfsCode.getEeList(): + sb.append(f"e {ee.v1} {ee.v2} {ee.edgeLabel}\n") + + if self.outputGraphIds: + sb.append("x " + " ".join(str(id) for id in subgraph.setOfGraphsIds)) + + sb.append("\n\n") + bw.write("".join(sb)) + i += 1
+ + + +
+[docs] + def readGraphs(self, path): + """ + The `readGraphs` function reads graph data from a file and constructs a list of graphs with vertices + and edges. + + :param path: The `path` parameter in the `readGraphs` method is the file path to the text file + containing the graph data that needs to be read and processed. This method reads the graph data from + the specified file and constructs a list of graphs represented by vertices and edges based on the + information in the + :return: The `readGraphs` method reads graph data from a file specified by the `path` parameter. It + parses the data to create a list of graph objects and returns this list. Each graph object contains + information about vertices and edges within the graph. + """ + with open(path, 'r') as br: + graphDatabase = [] + vMap = {} + gId = None + + for line in br: + line = line.strip() + + if line.startswith("t"): + if vMap: # If vMap is not empty, it means a graph was read + graphDatabase.append(_ab.Graph(gId, vMap)) + vMap = {} # Reset for the next graph + + gId = int(line.split(" ")[2]) + + elif line.startswith("v"): + items = line.split(" ") + vId = int(items[1]) + vLabel = int(items[2]) + vMap[vId] = _ab.Vertex(vId, vLabel) + + elif line.startswith("e"): + items = line.split(" ") + v1 = int(items[1]) + v2 = int(items[2]) + eLabel = int(items[3]) + e = _ab.Edge(v1, v2, eLabel) + vMap[v1].addEdge(e) + vMap[v2].addEdge(e) + + if vMap: + graphDatabase.append(_ab.Graph(gId, vMap)) + + self.graphCount = len(graphDatabase) + return graphDatabase
+ + + +
+[docs] + def subgraphIsomorphisms(self, c: _ab.DFSCode, g: _ab.Graph): + """ + The function `subgraphIsomorphisms` takes a DFS code and a graph as input, and finds all subgraph + isomorphisms between the DFS code and the graph. + + :param c: The parameter `c` in the `subgraphIsomorphisms` function is of type `_ab.DFSCode`, which + seems to represent a Depth-First Search code. + :param g: The parameter `g` in the `subgraphIsomorphisms` function represents a graph object. The + function is trying to find subgraph isomorphisms between a given DFS code `c` and the graph `g`. It + iterates through the vertices of the graph starting with a specific + :return: The function `subgraphIsomorphisms` returns a list of dictionaries, where each dictionary + represents a subgraph isomorphism mapping between the input DFS code `c` and the input graph `g`. + Each dictionary in the list maps vertex IDs from the DFS code to corresponding vertex IDs in the + graph, indicating a valid subgraph isomorphism. + """ + isoms = [] + startLabel = c.getEeList()[0].getVLabel1() + + # Find all vertices in the graph that match the start label and initialize isomorphisms with them + for vId in g.findAllWithLabel(startLabel): + hMap = {} + hMap[0] = vId + isoms.append(hMap) + + # For each edge in the DFS code, try to extend each partial isomorphism + for ee in c.getEeList(): + v1, v2, v2Label, eLabel = ee.getV1(), ee.getV2(), ee.getVLabel2(), ee.getEdgeLabel() + updateIsoms = [] + # Try to extend each current isomorphism with the current edge + for iso in isoms: + mappedV1 = iso.get(v1) + # Forward edge + if v1 < v2: + mappedVertices = list(iso.values()) + for mappedV2 in g.getAllNeighbors(mappedV1): + if (v2Label == mappedV2.getLabel() and + mappedV2.getId() not in mappedVertices and + eLabel == g.getEdgeLabel(mappedV1, mappedV2.getId())): + + tempM = iso.copy() + tempM[v2] = mappedV2.getId() + + updateIsoms.append(tempM) + + # Backward edge + else: + mappedV2 = iso.get(v2) + # Check if the backward edge exists in the graph matching the DFS code edge + if g.isNeighboring(mappedV1, mappedV2) and eLabel == g.getEdgeLabel(mappedV1, mappedV2): + updateIsoms.append(iso) + + isoms = updateIsoms + return isoms
+ + + +
+[docs] + def rightMostPathExtensionsFromSingle(self, c: _ab.DFSCode, g: _ab.Graph): + """ + The function `rightMostPathExtensionsFromSingle` generates extensions for a given DFS code and + graph, focusing on the rightmost path. + + :param c: The parameter `c` is of type `_ab.DFSCode`, which seems to represent a Depth-First Search + code. It is used in the `rightMostPathExtensionsFromSingle` method to perform operations related to + DFS codes + :param g: The parameter `g` in the provided code snippet represents a graph object. It seems to be + an instance of a graph data structure that contains vertices and edges. The code is designed to + find and return extensions from a given DFS code `c` based on the provided graph `g`. The function + ` + :return: The function `rightMostPathExtensionsFromSingle` returns a dictionary `extensions` + containing extended edges as keys and sets of graph IDs as values. + """ + # Get the unique identifier for the given graph + gid = g.getId() + # Initialize a dictionary to store potential extensions + extensions = {} + + # If the DFS code is empty, consider all edges of the graph for extension + if c.isEmpty(): + for vertex in g.vertices: + for e in vertex.getEdgeList(): + # Determine the order of vertex labels to maintain consistency + v1Label = g.getVLabel(e.v1) + v2Label = g.getVLabel(e.v2) + if v1Label < v2Label: + ee1 = _ab.ExtendedEdge(0, 1, v1Label, v2Label, e.getEdgeLabel()) + else: + ee1 = _ab.ExtendedEdge(0, 1, v2Label, v1Label, e.getEdgeLabel()) + + # Update the extensions dictionary with new or existing extended edges + setOfGraphIds = extensions.get(ee1, set()) + setOfGraphIds.add(gid) + extensions[ee1] = setOfGraphIds + else: + # For non-empty DFS code, focus on extending from the rightmost path + rightMost = c.getRightMost() + isoms = self.subgraphIsomorphisms(c, g) + + # Iterate through all isomorphisms to find valid extensions + for isom in isoms: + invertedIsom = {v: k for k, v in isom.items()} + mappedRm = isom[rightMost] + mappedRmLabel = g.getVLabel(mappedRm) + for x in g.getAllNeighbors(mappedRm): + invertedX = invertedIsom.get(x.getId()) + if invertedX is not None and c.onRightMostPath(invertedX) and c.notPreOfRm(invertedX) and not c.containEdge(rightMost, invertedX): + ee = _ab.ExtendedEdge(rightMost, invertedX, mappedRmLabel, x.getLabel(), g.getEdgeLabel(mappedRm, x.getId())) + extensions.setdefault(ee, set()).add(gid) + + mappedVertices = set(isom.values()) + for v in c.getRightMostPath(): + mappedV = isom[v] + mappedVLabel = g.getVLabel(mappedV) + for x in g.getAllNeighbors(mappedV): + if x.getId() not in mappedVertices: + ee = _ab.ExtendedEdge(v, rightMost + 1, mappedVLabel, x.getLabel(), g.getEdgeLabel(mappedV, x.getId())) + extensions.setdefault(ee, set()).add(gid) + + return extensions
+ + + +
+[docs] + def rightMostPathExtensions(self, c: _ab.DFSCode, graphDb, graphIds): + """ + The function `rightMostPathExtensions` generates extensions for a given DFS code by considering + rightmost paths in a graph database. + + :param c: The parameter `c` in the `rightMostPathExtensions` method is of type `_ab.DFSCode`. It + seems to represent a Depth-First Search code used in graph algorithms. The method is responsible + for generating extensions based on the rightmost path in a graph + :param graphDb: The `graphDb` parameter in the `rightMostPathExtensions` method is a + database that stores graph data. It is used to retrieve graph objects based on + their IDs, which are provided in the `graphIds` parameter. The method then performs operations on + these graph objects to generate + :param graphIds: The `graphIds` parameter in the `rightMostPathExtensions` function represents a + list of graph identifiers. These identifiers are used to retrieve specific graphs from the + `graphDb` database in order to perform operations on them within the function. Each ID in the + `graphIds` list corresponds to an identifier. + :return: The function `rightMostPathExtensions` returns a dictionary `extensions` containing + extended edges as keys and sets of graph IDs as values. + """ + extensions = {} + if c.isEmpty(): + for id in graphIds: + g = graphDb[id] + # Skip graphs if pruning based on edge count is enabled and applicable + if GSpan.edge_count_pruning and c.size >= g.getEdgeCount(): + self.pruneByEdgeCount += 1 + continue + for v in g.vertices: + for e in v.getEdgeList(): + # Organize the vertex labels to maintain consistent ordering + v1L = g.getVLabel(e.v1) + v2L = g.getVLabel(e.v2) + if v1L < v2L: + ee1 = _ab.ExtendedEdge(0, 1, v1L, v2L, e.getEdgeLabel()) + else: + ee1 = _ab.ExtendedEdge(0, 1, v2L, v1L, e.getEdgeLabel()) + + # Add the new or existing extensions to the dictionary + setOfGraphIds = extensions.get(ee1, set()) + setOfGraphIds.add(id) + extensions[ee1] = setOfGraphIds + else: + # For non-empty DFS codes, extend based on the rightmost path of each graph + rightMost = c.getRightMost() + for id in graphIds: + g = graphDb[id] + if GSpan.edge_count_pruning and c.size >= g.getEdgeCount(): + self.pruneByEdgeCount += 1 + continue + isoms = self.subgraphIsomorphisms(c, g) + for isom in isoms: + invertedIsom = {} + for key, value in isom.items(): + invertedIsom[value] = key + mappedRM = isom.get(rightMost) + mappedRMLabel = g.getVLabel(mappedRM) + for x in g.getAllNeighbors(mappedRM): + invertedX = invertedIsom.get(x.getId()) + if invertedX is not None and c.onRightMostPath(invertedX) and \ + c.notPreOfRm(invertedX) and not c.containEdge(rightMost, invertedX): + + ee = _ab.ExtendedEdge(rightMost, invertedX, mappedRMLabel, x.getLabel(), + g.getEdgeLabel(mappedRM, x.getId())) + + if ee not in extensions: + extensions[ee] = set() + extensions[ee].add(g.getId()) + + mappedVertices = isom.values() + for v in c.getRightMostPath(): + mappedV = isom[v] + mappedVLabel = g.getVLabel(mappedV) + for x in g.getAllNeighbors(mappedV): + if x.getId() not in mappedVertices: + ee = _ab.ExtendedEdge(v, rightMost + 1, mappedVLabel, x.getLabel(), + g.getEdgeLabel(mappedV, x.getId())) + + if ee not in extensions: + extensions[ee] = set() + extensions[ee].add(g.getId()) + return extensions
+ + + + +
+[docs] + def gspanDFS(self, c: _ab.DFSCode, graphDb, subgraphId): + """ + The `gspanDFS` function recursively explores graph patterns using the gSpan algorithm to find + frequent subgraphs in a graph database. + + :param c: In the provided code snippet, the parameter `c` is an instance of the `_ab.DFSCode` class. + It is used as an input to the `gspanDFS` method for performing Depth-First Search (DFS) traversal in + a graph mining algorithm. The `c` parameter represents + :type c: _ab.DFSCode + :param graphDb: The `graphDb` parameter refers to a graph database that the algorithm is + operating on. + :param subgraphId: The `subgraphId` parameter in the `gspanDFS` method refers to an + ID represents a specific subgraph within the graph database `graphDb`. + :return: The `gspanDFS` method is a recursive function that is called within itself to explore the graph + structure and find frequent subgraphs. The function does not have a return value, but it modifies + the `self.frequentSubgraphs` list by appending new frequent subgraphs found during the DFS traversal. + """ + + if c.size == self.maxNumberOfEdges - 1: + return + extensions = self.rightMostPathExtensions(c, graphDb, subgraphId) + + for extension, newGraphIds in extensions.items(): + sup = len(newGraphIds) + + if (sup >= self.minSup): + newC = c.copy() + newC.add(extension) + + if (self.isCanonical(newC)): + subgraph = _ab.FrequentSubgraph(newC, newGraphIds, sup) + self.frequentSubgraphs.append(subgraph) + + self.gspanDFS(newC, graphDb, newGraphIds)
+ + + +
+[docs] + def isCanonical(self, c: _ab.DFSCode): + """ + The function `isCanonical` checks if a given DFS code is canonical by comparing it with its + rightmost path extensions. + + :param c: The parameter `c` is an instance of the `_ab.DFSCode` class + :type c: _ab.DFSCode + :return: a boolean value. It returns True if the input DFSCode `c` is canonical, and False if it is + not canonical. + """ + canC = _ab.DFSCode() + for i in range(c.size): + extensions = self.rightMostPathExtensionsFromSingle(canC, _ab.Graph(c)) + minEe = None + for ee in extensions.keys(): + if minEe is None or ee.smallerThan(minEe): + minEe = ee + + if minEe is not None and minEe.smallerThan(c.getAt(i)): + return False + + if minEe is not None: + canC.add(minEe) + return True
+ + + +
+[docs] + def gSpan(self, graphDb, outputFrequentVertices): + """ + The gSpan function in Python processes a graph database by precalculating vertex lists, removing + infrequent vertex pairs, and performing a depth-first search algorithm. + + :param graphDb: The `graphDb` parameter refers to a graph database that the algorithm is + operating on. + :param outputFrequentVertices: The `outputFrequentVertices` parameter is a boolean flag that + determines whether the frequent vertices should be output or not. + """ + if outputFrequentVertices or GSpan.eliminate_infrequent_vertices: + self.findAllOnlyOneVertex(graphDb, outputFrequentVertices) + + for g in graphDb: + g.precalculateVertexList() + + if GSpan.eliminate_infrequent_vertex_pairs or GSpan.eliminate_infrequent_edge_labels: + self.removeInfrequentVertexPairs(graphDb) + + graphIds = set() + for i, g in enumerate(graphDb): + if g.vertices is not None and len(g.vertices) != 0: + if self.infrequentVerticesRemovedCount > 0: + g.precalculateVertexList() + + graphIds.add(i) + g.precalculateVertexNeighbors() + g.precalculateLabelsToVertices() + else: + self.emptyGraphsRemoved += 1 + + if len(self.frequentVertexLabels) != 0: + self.gspanDFS(_ab.DFSCode(), graphDb, graphIds)
+ + + +
+[docs] + class Pair: + def __init__(self, x, y): + if x < y: + self.x = x + self.y = y + else: + self.x = y + self.y = x + + def __eq__(self, other): + if isinstance(other, GSpan.Pair): + return self.x == other.x and self.y == other.y + return False + + def __hash__(self): + return self.x + 100 * self.y
+ + + +
+[docs] + def findAllOnlyOneVertex(self, graphDb, outputFrequentVertices): + """ + The function `findAllOnlyOneVertex` iterates through a graph database to find frequent vertices + based on a minimum support threshold, storing the results and optionally removing infrequent + vertices. + + :param graphDb: The `graphDb` parameter refers to a graph database that the algorithm is + operating on. + :param outputFrequentVertices: The `outputFrequentVertices` parameter is a boolean flag that + determines whether the frequent vertices should be included in the output or not. + """ + self.frequentVertexLabels = [] + labelM = {} + for g in graphDb: + for v in g.getNonPrecalculatedAllVertices(): + if v.getEdgeList(): + vLabel = v.getLabel() + labelM.setdefault(vLabel, set()).add(g.getId()) + # Check each label for frequency against the minimum support threshold + for label, tempSupG in labelM.items(): + sup = len(tempSupG) + if sup >= self.minSup: + self.frequentVertexLabels.append(label) + if outputFrequentVertices: + tempD = _ab.DFSCode() + tempD.add(_ab.ExtendedEdge(0, 0, label, label, -1)) + self.frequentSubgraphs.append(_ab.FrequentSubgraph(tempD, tempSupG, sup)) + elif GSpan.eliminate_infrequent_vertices: + for graphId in tempSupG: + g = graphDb[graphId] + g.removeInfrequentLabel(label) + self.infrequentVerticesRemovedCount += 1
+ + + +
+[docs] + def removeInfrequentVertexPairs(self, graphDb): + """ + The function `removeInfrequentVertexPairs` processes a graph database by removing infrequent vertex + pairs and edge labels based on specified support thresholds. + + :param graphDb: The `graphDb` parameter refers to a graph database that the algorithm is + operating on. + """ + if GSpan.eliminate_infrequent_edge_labels: + matrix = _ab.SparseTriangularMatrix() + alreadySeenPair = set() # To avoid double counting pairs in the same graph + + if GSpan.eliminate_infrequent_edge_labels: + mapEdgeLabelToSupport = {} + alreadySeenEdgeLabel = set() # To avoid double counting edge labels in the same graph + + for g in graphDb: + vertices = g.getAllVertices() + + # Check each vertex and its edges for infrequent pairs and labels + for v1 in vertices: + labelV1 = v1.getLabel() + + for edge in v1.getEdgeList(): + v2 = edge.another(v1.getId()) + labelV2 = g.getVLabel(v2) + + # Track vertex label pairs for infrequency analysis + if GSpan.eliminate_infrequent_edge_labels: + pair = self.Pair(labelV1, labelV2) + if pair not in alreadySeenPair: + matrix.incrementCount(labelV1, labelV2) + alreadySeenPair.add(pair) + + # Track edge labels for infrequency analysis + if GSpan.eliminate_infrequent_edge_labels: + edgeLabel = edge.getEdgeLabel() + if edgeLabel not in alreadySeenEdgeLabel: + alreadySeenEdgeLabel.add(edgeLabel) + edgeSupport = mapEdgeLabelToSupport.get(edgeLabel, 0) + mapEdgeLabelToSupport[edgeLabel] = edgeSupport + 1 + + if GSpan.eliminate_infrequent_vertex_pairs: + alreadySeenPair.clear() + if GSpan.eliminate_infrequent_edge_labels: + alreadySeenEdgeLabel.clear() + + if GSpan.eliminate_infrequent_vertex_pairs: + matrix.removeInfrequentEntriesFromMatrix(self.minSup) + + if GSpan.eliminate_infrequent_vertex_pairs or GSpan.eliminate_infrequent_edge_labels: + for g in graphDb: + vertices = g.getAllVertices() + + for v1 in vertices: + iterEdges = iter(v1.getEdgeList()) + for edge in iterEdges: + v2 = edge.another(v1.getId()) + labelV2 = g.getVLabel(v2) + count = matrix.getSupportForItems(v1.getLabel(), labelV2) + + # Remove edges based on infrequency criteria + if GSpan.eliminate_infrequent_vertex_pairs and count < self.minSup: + v1.removeEdge(edge) + self.infrequentVertexPairsRemoved += 1 + + elif GSpan.eliminate_infrequent_edge_labels and \ + mapEdgeLabelToSupport.get(edge.getEdgeLabel(), 0) < self.minSup: + v1.removeEdge(edge) + self.edgeRemovedByLabel += 1
+ + + +
+[docs] + def getMemoryRSS(self): + return self._memoryRSS
+ + +
+[docs] + def getMemoryUSS(self): + return self._memoryUSS
+ + +
+[docs] + def getRuntime(self): + return self._runtime
+ + +
+[docs] + def getFrequentSubgraphs(self): + sb = [] + for i, subgraph in enumerate(self.frequentSubgraphs): + dfsCode = subgraph.dfsCode + subgraphDescription = [f"t # {i} * {subgraph.support}"] + + if dfsCode.size == 1: + ee = dfsCode.getEeList()[0] + subgraphDescription.append(f"v 0 {ee.vLabel1}") + if ee.edgeLabel != -1: + subgraphDescription.append(f"v 1 {ee.vLabel2}") + subgraphDescription.append(f"e 0 1 {ee.edgeLabel}") + else: + vLabels = dfsCode.getAllVLabels() + for j, vLabel in enumerate(vLabels): + subgraphDescription.append(f"v {j} {vLabel}") + for ee in dfsCode.getEeList(): + subgraphDescription.append(f"e {ee.v1} {ee.v2} {ee.edgeLabel}") + + sb.append('\n'.join(subgraphDescription)) + return '\n'.join(sb)
+
+ + + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/sparseTriangularMatrix.html b/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/sparseTriangularMatrix.html new file mode 100644 index 000000000..d8764a3f5 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/sparseTriangularMatrix.html @@ -0,0 +1,165 @@ + + + + + + PAMI.subgraphMining.basic.sparseTriangularMatrix — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.subgraphMining.basic.sparseTriangularMatrix

+# The `SparseTriangularMatrix` class represents a matrix with sparse triangular structure and provides
+# methods for incrementing counts, getting support for items, setting support values, and removing
+# infrequent entries based on a minimum support threshold.
+
+[docs] +class SparseTriangularMatrix: + def __init__(self): + self.matrix = {} + + def __str__(self): + temp = [] + for key in sorted(self.matrix.keys()): + subkeys = self.matrix[key] + subkeyStr = " ".join(f"{subkey}:{count}" for subkey, count in subkeys.items()) + temp.append(f"{key}: {subkeyStr}\n") + return "".join(temp) + + +
+[docs] + def incrementCount(self, i, j): + if i < j: + key, subkey = i, j + else: + key, subkey = j, i + + if key not in self.matrix: + self.matrix[key] = {subkey: 1} + else: + if subkey not in self.matrix[key]: + self.matrix[key][subkey] = 1 + else: + self.matrix[key][subkey] += 1
+ + +
+[docs] + def getSupportForItems(self, i, j): + smaller, larger = min(i, j), max(i, j) + return self.matrix.get(smaller, {}).get(larger, 0)
+ + +
+[docs] + def setSupport(self, i, j, support): + smaller, larger = min(i, j), max(i, j) + + if smaller not in self.matrix: + self.matrix[smaller] = {larger: support} + else: + self.matrix[smaller][larger] = support
+ + +
+[docs] + def removeInfrequentEntriesFromMatrix(self, minsup): + for key in list(self.matrix.keys()): + for subkey, value in list(self.matrix[key].items()): + if value < minsup: + del self.matrix[key][subkey] + if not self.matrix[key]: + del self.matrix[key]
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/vertex.html b/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/vertex.html new file mode 100644 index 000000000..fe99a53da --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/subgraphMining/basic/vertex.html @@ -0,0 +1,152 @@ + + + + + + PAMI.subgraphMining.basic.vertex — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.subgraphMining.basic.vertex

+
+[docs] +class Vertex: + def __init__(self, id, vLabel): + self.id = id + self.vLabel = vLabel + self.eList = [] + +
+[docs] + def addEdge(self, edge): + self.eList.append(edge)
+ + +
+[docs] + def getId(self): + return self.id
+ + +
+[docs] + def getLabel(self): + return self.vLabel
+ + +
+[docs] + def getEdgeList(self): + return self.eList
+ + + def __eq__(self, other): + if not isinstance(other, Vertex): + return False + return self.id == other.id + + def __lt__(self, other): + if not isinstance(other, Vertex): + return NotImplemented + return self.id < other.id + + def __repr__(self): + return f"Vertex(ID: {self.id}, Label: {self.vLabel})" + +
+[docs] + def removeEdge(self, edgeToRemove): + self.eList = [edge for edge in self.eList if edge != edgeToRemove]
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/DFSCode.html b/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/DFSCode.html new file mode 100644 index 000000000..db273ba6e --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/DFSCode.html @@ -0,0 +1,210 @@ + + + + + + PAMI.subgraphMining.topK.DFSCode — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.subgraphMining.topK.DFSCode

+import pickle
+
+
+[docs] +class DfsCode: + def __init__(self): + self.rightMost = -1 + self.size = 0 + self.rightMostPath = [] + self.eeList = [] + +
+[docs] + def copy(self): + return pickle.loads(pickle.dumps(self))
+ + +
+[docs] + def notPreOfRm(self, v): + if len(self.rightMostPath) <= 1: + return True + return v != self.rightMostPath[-2]
+ + +
+[docs] + def getAllVLabels(self): + labels = [] + vertexMap = {} + for ee in self.eeList: + v1, v1Label = ee.getV1(), ee.getVLabel1() + v2, v2Label = ee.getV2(), ee.getVLabel2() + vertexMap[v1] = v1Label + vertexMap[v2] = v2Label + + count = 0 + while count in vertexMap: + labels.append(vertexMap[count]) + count += 1 + return labels
+ + +
+[docs] + def add(self, ee): + if self.size == 0: + self.rightMost = 1 + self.rightMostPath.extend([0, 1]) + else: + v1, v2 = ee.getV1(), ee.getV2() + if v1 < v2: + self.rightMost = v2 + while self.rightMostPath and self.rightMostPath[-1] > v1: + self.rightMostPath.pop() + self.rightMostPath.append(v2) + + self.eeList.append(ee) + self.size += 1
+ + +
+[docs] + def getAt(self, index): + return self.eeList[index]
+ + +
+[docs] + def onRightMostPath(self, v): + return v in self.rightMostPath
+ + +
+[docs] + def containEdge(self, v1, v2): + for ee in self.eeList: + if (ee.getV1() == v1 and ee.getV2() == v2) or (ee.getV1() == v2 and ee.getV2() == v1): + return True + return False
+ + +
+[docs] + def isEmpty(self): + return not self.eeList
+ + +
+[docs] + def getRightMost(self): + return self.rightMost
+ + +
+[docs] + def getRightMostPath(self): + return self.rightMostPath
+ + +
+[docs] + def getEeList(self): + return self.eeList
+ + + def __str__(self): + return "DfsCode: " + " ".join(str(ee) for ee in self.eeList)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/DFSThread.html b/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/DFSThread.html new file mode 100644 index 000000000..dce49a80f --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/DFSThread.html @@ -0,0 +1,123 @@ + + + + + + PAMI.subgraphMining.topK.DFSThread — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.subgraphMining.topK.DFSThread

+import threading
+
+
+[docs] +class DfsThread(threading.Thread): + def __init__(self, graphDb, candidates, minSup, tkgInstance): + threading.Thread.__init__(self) + self.graphDb = graphDb + self.candidates = candidates + self.minSup = minSup + self.tkgInstance = tkgInstance + +
+[docs] + def run(self): + while not self.candidates.empty(): + _, candidate = self.candidates.get() + if len(candidate.setOfGraphsIds) < self.minSup: + break + self.tkgInstance.gspanDynamicDFS(candidate.dfsCode, self.graphDb, candidate.setOfGraphsIds)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/edge.html b/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/edge.html new file mode 100644 index 000000000..40969ee75 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/edge.html @@ -0,0 +1,136 @@ + + + + + + PAMI.subgraphMining.topK.edge — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.subgraphMining.topK.edge

+
+[docs] +class Edge: + def __init__(self, v1, v2, edgeLabel): + self.v1 = v1 + self.v2 = v2 + self.edgeLabel = edgeLabel + self.hashCode = (v1 + 1) * 100 + (v2 + 1) * 10 + edgeLabel + +
+[docs] + def another(self, v): + return self.v2 if v == self.v1 else self.v1
+ + +
+[docs] + def getEdgeLabel(self): + return self.edgeLabel
+ + + def __hash__(self): + return self.hashCode + + def __eq__(self, other): + if not isinstance(other, Edge): + return False + return (self.hashCode == other.hashCode and + self.v1 == other.v1 and + self.v2 == other.v2 and + self.edgeLabel == other.edgeLabel) + + def __repr__(self): + return f"Edge(v1: {self.v1}, v2: {self.v2}, Label: {self.edgeLabel})"
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/extendedEdge.html b/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/extendedEdge.html new file mode 100644 index 000000000..12a7178c6 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/extendedEdge.html @@ -0,0 +1,209 @@ + + + + + + PAMI.subgraphMining.topK.extendedEdge — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.subgraphMining.topK.extendedEdge

+
+[docs] +class ExtendedEdge: + def __init__(self, v1, v2, vLabel1, vLabel2, edgeLabel): + self.v1 = v1 + self.v2 = v2 + self.vLabel1 = vLabel1 + self.vLabel2 = vLabel2 + self.edgeLabel = edgeLabel + self.hashCode = (1 + v1) * 100 + (1 + v2) * 50 + (1 + vLabel1) * 30 + (1 + vLabel2) * 20 + (1 + edgeLabel) + +
+[docs] + def smallerThan(self, that): + if that is None: + return True + + x1, x2, y1, y2 = self.v1, self.v2, that.v1, that.v2 + + if self.pairSmallerThan(x1, x2, y1, y2): + return True + elif x1 == y1 and x2 == y2: + return (self.vLabel1 < that.vLabel1 or + (self.vLabel1 == that.vLabel1 and self.vLabel2 < that.vLabel2) or + (self.vLabel1 == that.vLabel1 and self.vLabel2 == that.vLabel2 and + self.edgeLabel < that.edgeLabel)) + else: + return False
+ + +
+[docs] + def smallerThanOriginal(self, that): + if that is None: + return True + + x1, x2, y1, y2 = self.v1, self.v2, that.v1, that.v2 + + if self.pairSmallerThan(x1, x2, y1, y2): + return True + elif x1 == y1 and x2 == y2: + return (self.vLabel1 < that.vLabel1 or + (self.vLabel1 == that.vLabel1 and self.edgeLabel < that.edgeLabel) or + (self.vLabel1 == that.vLabel1 and self.edgeLabel == that.edgeLabel and + self.vLabel2 < that.vLabel2)) + else: + return False
+ + +
+[docs] + def pairSmallerThan(self, x1, x2, y1, y2): + xForward = x1 < x2 + yForward = y1 < y2 + + if xForward and yForward: + return x2 < y2 or (x2 == y2 and x1 > y1) + elif not xForward and not yForward: + return x1 < y1 or (x1 == y1 and x2 < y2) + elif xForward: + return x2 <= y1 + else: + return x1 < y2
+ + + def __hash__(self): + return self.hashCode + + def __eq__(self, other): + if not isinstance(other, ExtendedEdge): + return False + return (self.v1 == other.v1 and self.v2 == other.v2 and + self.vLabel1 == other.vLabel1 and self.vLabel2 == other.vLabel2 and + self.edgeLabel == other.edgeLabel) + + def __repr__(self): + return f"<{self.v1},{self.v2},{self.vLabel1},{self.vLabel2},{self.edgeLabel}>" + +
+[docs] + def getV1(self): + return self.v1
+ + +
+[docs] + def getV2(self): + return self.v2
+ + +
+[docs] + def getVLabel1(self): + return self.vLabel1
+ + +
+[docs] + def getVLabel2(self): + return self.vLabel2
+ + +
+[docs] + def getEdgeLabel(self): + return self.edgeLabel
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/frequentSubgraph.html b/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/frequentSubgraph.html new file mode 100644 index 000000000..ccaeba1d1 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/frequentSubgraph.html @@ -0,0 +1,124 @@ + + + + + + PAMI.subgraphMining.topK.frequentSubgraph — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.subgraphMining.topK.frequentSubgraph

+
+[docs] +class FrequentSubgraph: + def __init__(self, dfsCode, setOfGraphsIds, support): + self.dfsCode = dfsCode + self.setOfGraphsIds = setOfGraphsIds + self.support = support + + def __eq__(self, other): + if not isinstance(other, FrequentSubgraph): + return NotImplemented + return self.support == other.support + + def __lt__(self, other): + if not isinstance(other, FrequentSubgraph): + return NotImplemented + return self.support < other.support + + def __gt__(self, other): + if not isinstance(other, FrequentSubgraph): + return NotImplemented + return self.support > other.support
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/graph.html b/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/graph.html new file mode 100644 index 000000000..68a65e563 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/graph.html @@ -0,0 +1,285 @@ + + + + + + PAMI.subgraphMining.topK.graph — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.subgraphMining.topK.graph

+from .edge import Edge
+from .vertex import Vertex
+
+
+[docs] +class Graph: + EMPTY_VERTEX_LIST = [] + EMPTY_INTEGER_ARRAY = [] + + def __init__(self, id, vMap=None, dfsCode=None): + self.vMap = {} + self.id = id + if vMap is not None: + self.vMap = vMap + elif dfsCode is not None: + for ee in dfsCode.getEeList(): + v1, v2, v1Label, v2Label, eLabel = ee.getV1(), ee.getV2(), ee.getVLabel1(), ee.getVLabel2(), ee.getEdgeLabel() + + e = Edge(v1, v2, eLabel) + if v1 not in self.vMap: + self.vMap[v1] = Vertex(v1, v1Label) + if v2 not in self.vMap: + self.vMap[v2] = Vertex(v2, v2Label) + + self.vMap[v1].addEdge(e) + self.vMap[v2].addEdge(e) + + self.id = -1 + + self.vertices = [] + self.neighborCache = {} + self.mapLabelToVertexIds = {} + self.edgeCount = 0 + + self.precalculateVertexList() + self.precalculateVertexNeighbors() + self.precalculateLabelsToVertices() + +
+[docs] + def getId(self): + return self.id
+ + +
+[docs] + def removeInfrequentLabel(self, label): + toRemove = [key for key, vertex in self.vMap.items() if vertex.getLabel() == label] + for key in toRemove: + del self.vMap[key] + + for vertex in self.vMap.values(): + edgesToRemove = [edge for edge in vertex.getEdgeList() + if edge.getV1() not in self.vMap or edge.getV2() not in self.vMap] + + for edge in edgesToRemove: + vertex.getEdgeList().remove(edge)
+ + +
+[docs] + def precalculateVertexNeighbors(self): + self.neighborCache = {} + self.edgeCount = 0 + + for vertexId, vertex in self.vMap.items(): + neighbors = [] + + for edge in vertex.getEdgeList(): + neighborVertex = self.vMap[edge.another(vertexId)] + neighbors.append(neighborVertex) + + neighbors.sort(key=lambda x: x.id) + + self.neighborCache[vertexId] = neighbors + self.edgeCount += len(neighbors) + + self.edgeCount //= 2
+ + +
+[docs] + def precalculateVertexList(self): + self.vertices = [] + for _, vertex in self.vMap.items(): + self.vertices.append(vertex)
+ + +
+[docs] + def precalculateLabelsToVertices(self): + self.mapLabelToVertexIds = {} + for vertex in self.vertices: + label = vertex.getLabel() + if label not in self.mapLabelToVertexIds: + sameIds = [v.getId() for v in self.vertices if v.getLabel() == label] + self.mapLabelToVertexIds[label] = sameIds
+ + +
+[docs] + def findAllWithLabel(self, targetLabel): + if targetLabel in self.mapLabelToVertexIds: + return self.mapLabelToVertexIds[targetLabel] + else: + return []
+ + +
+[docs] + def getAllNeighbors(self, v): + try: + neighbors = self.neighborCache[v] + except KeyError: + neighbors = [] + return neighbors
+ + +
+[docs] + def getVLabel(self, v): + return self.vMap[v].getLabel()
+ + +
+[docs] + def getEdgeLabel(self, v1, v2): + for e in self.vMap.get(v1).getEdgeList(): + if e.v1 == v1 and e.v2 == v2: + return e.getEdgeLabel() + elif e.v1 == v2 and e.v2 == v1: + return e.getEdgeLabel() + return -1
+ + + +
+[docs] + def getEdge(self, v1, v2): + for e in self.vMap.get(v1).getEdgeList(): + if e.v1 == v1 and e.v2 == v2: + return e + elif e.v1 == v2 and e.v2 == v1: + return e + return None
+ + +
+[docs] + def getNonPrecalculatedAllVertices(self): + return list(self.vMap.values())
+ + +
+[docs] + def isNeighboring(self, v1, v2): + neighborsOfV1 = self.neighborCache.get(v1, []) + low = 0 + high = len(neighborsOfV1) - 1 + + while high >= low: + middle = (low + high) // 2 + val = neighborsOfV1[middle].id + if val == v2: + return True + if val < v2: + low = middle + 1 + if val > v2: + high = middle - 1 + return False
+ + +
+[docs] + def getAllVertices(self): + return self.vertices
+ + +
+[docs] + def getEdgeCount(self): + return self.edgeCount
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/sparseTriangularMatrix.html b/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/sparseTriangularMatrix.html new file mode 100644 index 000000000..aedfc0181 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/sparseTriangularMatrix.html @@ -0,0 +1,161 @@ + + + + + + PAMI.subgraphMining.topK.sparseTriangularMatrix — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.subgraphMining.topK.sparseTriangularMatrix

+
+[docs] +class SparseTriangularMatrix: + def __init__(self): + self.matrix = {} + + def __str__(self): + temp = [] + for key in sorted(self.matrix.keys()): + subkeys = self.matrix[key] + subkeyStr = " ".join(f"{subkey}:{count}" for subkey, count in subkeys.items()) + temp.append(f"{key}: {subkeyStr}\n") + return "".join(temp) + +
+[docs] + def incrementCount(self, i, j): + if i < j: + key, subkey = i, j + else: + key, subkey = j, i + + if key not in self.matrix: + self.matrix[key] = {subkey: 1} + else: + if subkey not in self.matrix[key]: + self.matrix[key][subkey] = 1 + else: + self.matrix[key][subkey] += 1
+ + +
+[docs] + def getSupportForItems(self, i, j): + smaller, larger = min(i, j), max(i, j) + return self.matrix.get(smaller, {}).get(larger, 0)
+ + +
+[docs] + def setSupport(self, i, j, support): + smaller, larger = min(i, j), max(i, j) + + if smaller not in self.matrix: + self.matrix[smaller] = {larger: support} + else: + self.matrix[smaller][larger] = support
+ + +
+[docs] + def removeInfrequentEntriesFromMatrix(self, minsup): + for key in list(self.matrix.keys()): + for subkey, value in list(self.matrix[key].items()): + if value < minsup: + del self.matrix[key][subkey] + if not self.matrix[key]: + del self.matrix[key]
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/tkg.html b/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/tkg.html new file mode 100644 index 000000000..7c47d1894 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/tkg.html @@ -0,0 +1,724 @@ + + + + + + PAMI.subgraphMining.topK.tkg — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for PAMI.subgraphMining.topK.tkg

+# from PAMI.subgraphMining.topK import tkg as alg
+
+# obj = alg.TKG(iFile, k)
+
+# obj.startMine()
+
+# frequentGraphs = obj.getKSubgraphs()
+
+# memUSS = obj.getMemoryUSS()
+
+# obj.save(oFile)
+
+# print("Total Memory in USS:", memUSS)
+
+# memRSS = obj.getMemoryRSS()
+
+# print("Total Memory in RSS", memRSS)
+
+# run = obj.getRuntime()
+
+# print("Total ExecutionTime in seconds:", run)
+
+# minSup = obj.getMinSupport()
+
+# print("Minimum support:", minSup)
+
+
+from PAMI.subgraphMining.topK import abstract as _ab
+
+
+
+[docs] +class TKG(_ab._TKG): + ELIMINATE_INFREQUENT_VERTICES = True + ELIMINATE_INFREQUENT_VERTEX_PAIRS = True + ELIMINATE_INFREQUENT_EDGE_LABELS = True + EDGE_COUNT_PRUNING = True + DYNAMIC_SEARCH = True + THREADED_DYNAMIC_SEARCH = True + + def __init__(self, iFile, k, maxNumberOfEdges=float('inf'), outputSingleVertices=True, outputGraphIds=False): + self.iFile = iFile + self.k = k + self.outputGraphIds = outputGraphIds + self.outputSingleVertices = outputSingleVertices + self.maxNumberOfEdges = maxNumberOfEdges + self.frequentSubgraphs = [] + self.graphCount = 0 + self.patternCount = 0 + self.frequentVertexLabels = [] + self.infrequentVerticesRemovedCount = 0 + self.infrequentVertexPairsRemovedCount = 0 + self.skipStrategyCount = 0 + self.threadCount = 1 + self.edgeRemovedByLabel = 0 + self.eliminatedWithMaxSize = 0 + self.emptyGraphsRemoved = 0 + self.pruneByEdgeCount = 0 + + +
+[docs] + def startMine(self): + """ + This Python function starts a mining process on a graph database, calculates runtime, pattern count, + and memory usage metrics. + """ + if self.maxNumberOfEdges <= 0: + return + + self.kSubgraphs = _ab.PriorityQueue() + self.candidates = _ab.PriorityQueue() + + self.runtime = 0 + + t1 = _ab.time.time() + graphDb = self.readGraphs(self.iFile) + self.minSup = 1 + + self.gSpan(graphDb, self.outputSingleVertices) + + t2 = _ab.time.time() + self.runtime = t2 - t1 + self.patternCount = self.getQueueSize(self.kSubgraphs) + + process = _ab._psutil.Process(_ab._os.getpid()) + + self._memoryUSS = float() + + self._memoryRSS = float() + + self._memoryUSS = process.memory_full_info().uss + + self._memoryRSS = process.memory_info().rss
+ + + + +
+[docs] + def readGraphs(self, path): + """ + The `readGraphs` function reads graph data from a file and constructs a list of graphs with vertices + and edges. + + :param path: This method reads the graph data from the specified file and constructs a list of graphs + represented by vertices and edges + :return: The `readGraphs` method returns a list of `_ab.Graph` objects, which represent graphs read + from the file. + """ + with open(path, 'r') as br: + graphDatabase = [] + vMap = {} + gId = None + + for line in br: + line = line.strip() + if line.startswith("t"): + if vMap: + graphDatabase.append(_ab.Graph(gId, vMap)) + vMap = {} + gId = int(line.split()[2]) + elif line.startswith("v"): + items = line.split() + vId, vLabel = int(items[1]), int(items[2]) + vMap[vId] = _ab.Vertex(vId, vLabel) + elif line.startswith("e"): + items = line.split() + v1, v2, eLabel = int(items[1]), int(items[2]), int(items[3]) + edge = _ab.Edge(v1, v2, eLabel) + vMap[v1].addEdge(edge) + vMap[v2].addEdge(edge) + + if vMap: + graphDatabase.append(_ab.Graph(gId, vMap)) + + self.graphCount = len(graphDatabase) + return graphDatabase
+ + +
+[docs] + def save(self, oFile): + """ + The `save` function writes subgraph information to a file in a specific format. + + :param oFile: The `oFile` parameter in the `save` method is the file path where the output will be + saved. This method writes the subgraphs information to the specified file in a specific format + """ + subgraphsList = self.getSubgraphsList() + + with open(oFile, 'w') as bw: + for i, subgraph in enumerate(subgraphsList): + sb = [] + dfsCode = subgraph.dfsCode + + sb.append(f"t # {i} * {subgraph.support}\n") + if len(dfsCode.eeList) == 1: + ee = dfsCode.eeList[0] + sb.append(f"v 0 {ee.vLabel1}\n") + if ee.edgeLabel != -1: + sb.append(f"v 1 {ee.vLabel2}\n") + sb.append(f"e 0 1 {ee.edgeLabel}\n") + else: + vLabels = dfsCode.getAllVLabels() + for j, vLabel in enumerate(vLabels): + sb.append(f"v {j} {vLabel}\n") + for ee in dfsCode.eeList: + sb.append(f"e {ee.v1} {ee.v2} {ee.edgeLabel}\n") + + if self.outputGraphIds: + sb.append("x " + " ".join(str(id) for id in subgraph.setOfGraphsIds)) + sb.append("\n\n") + bw.write("".join(sb))
+ + + +
+[docs] + def savePattern(self, subgraph): + # previousMinSup = self.minSup + + self.kSubgraphs.put(subgraph) + if self.kSubgraphs.qsize() > self.k: + while self.kSubgraphs.qsize() > self.k: + lower = self.kSubgraphs.get() + + if lower.support > self.minSup: + self.minSup = lower.support
+ + + +
+[docs] + def getQueueSize(self, queue): + size = 0 + tempQueue = _ab.PriorityQueue() + + while not queue.empty(): + item = queue.get() + tempQueue.put(item) + size += 1 + + while not tempQueue.empty(): + queue.put(tempQueue.get()) + + return size
+ + +
+[docs] + def subgraphIsomorphisms(self, c, g): + isoms = [] + startLabel = c.getEeList()[0].vLabel1 + for vId in g.findAllWithLabel(startLabel): + isoms.append({0: vId}) + + for ee in c.getEeList(): + v1, v2, v2Label, eLabel = ee.v1, ee.v2, ee.vLabel2, ee.edgeLabel + updateIsoms = [] + for iso in isoms: + mappedV1 = iso[v1] + if v1 < v2: + mappedVertices = set(iso.values()) + for mappedV2 in g.getAllNeighbors(mappedV1): + if v2Label == mappedV2.getLabel() and mappedV2.getId() not in mappedVertices and eLabel == g.getEdgeLabel(mappedV1, mappedV2.getId()): + tempIso = iso.copy() + tempIso[v2] = mappedV2.getId() + updateIsoms.append(tempIso) + else: + mappedV2 = iso[v2] + if g.isNeighboring(mappedV1, mappedV2) and eLabel == g.getEdgeLabel(mappedV1, mappedV2): + updateIsoms.append(iso) + isoms = updateIsoms + return isoms
+ + + + +
+[docs] + def rightMostPathExtensionsFromSingle(self, c, g): + gid = g.getId() + extensions = {} + + if c.isEmpty(): + for vertex in g.vertices: + for e in vertex.getEdgeList(): + v1Label = g.getVLabel(e.v1) + v2Label = g.getVLabel(e.v2) + if v1Label < v2Label: + ee1 = _ab.ExtendedEdge(0, 1, v1Label, v2Label, e.getEdgeLabel()) + else: + ee1 = _ab.ExtendedEdge(0, 1, v2Label, v1Label, e.getEdgeLabel()) + extensions.setdefault(ee1, set()).add(gid) + else: + rightMost = c.getRightMost() + isoms = self.subgraphIsomorphisms(c, g) + for iso in isoms: + invertedIsom = {v: k for k, v in iso.items()} + mappedRm = iso[rightMost] + mappedRmLabel = g.getVLabel(mappedRm) + for x in g.getAllNeighbors(mappedRm): + invertedX = invertedIsom.get(x.getId()) + if invertedX is not None and c.onRightMostPath(invertedX) and not c.containEdge(rightMost, invertedX): + ee = _ab.ExtendedEdge(rightMost, invertedX, mappedRmLabel, x.getLabel(), g.getEdgeLabel(mappedRm, x.getId())) + extensions.setdefault(ee, set()).add(gid) + mappedVertices = set(iso.values()) + for v in c.getRightMostPath(): + mappedV = iso[v] + mappedVLabel = g.getVLabel(mappedV) + for x in g.getAllNeighbors(mappedV): + if x.getId() not in mappedVertices: + ee = _ab.ExtendedEdge(v, rightMost + 1, mappedVLabel, x.getLabel(), g.getEdgeLabel(mappedV, x.getId())) + extensions.setdefault(ee, set()).add(gid) + return extensions
+ + +
+[docs] + def rightMostPathExtensions(self, c, graphDB, graphIds): + extensions = {} + + if c.isEmpty(): + for graphId in graphIds: + g = graphDB[graphId] + if self.EDGE_COUNT_PRUNING and c.size >= g.getEdgeCount(): + self.pruneByEdgeCount += 1 + continue + for vertex in g.vertices: + for e in vertex.getEdgeList(): + v1Label = g.getVLabel(e.v1) + v2Label = g.getVLabel(e.v2) + if v1Label < v2Label: + ee1 = _ab.ExtendedEdge(0, 1, v1Label, v2Label, e.getEdgeLabel()) + else: + ee1 = _ab.ExtendedEdge(0, 1, v2Label, v1Label, e.getEdgeLabel()) + extensions.setdefault(ee1, set()).add(graphId) + else: + rightMost = c.getRightMost() + for graphId in graphIds: + g = graphDB[graphId] + if self.EDGE_COUNT_PRUNING and c.size >= g.getEdgeCount(): + self.pruneByEdgeCount += 1 + continue + isoms = self.subgraphIsomorphisms(c, g) + for isom in isoms: + invertedIsom = {v: k for k, v in isom.items()} + mappedRm = isom[rightMost] + mappedRmLabel = g.getVLabel(mappedRm) + for x in g.getAllNeighbors(mappedRm): + invertedX = invertedIsom.get(x.getId()) + if invertedX is not None and c.onRightMostPath(invertedX) and not c.containEdge(rightMost, invertedX): + ee = _ab.ExtendedEdge(rightMost, invertedX, mappedRmLabel, x.getLabel(), g.getEdgeLabel(mappedRm, x.getId())) + extensions.setdefault(ee, set()).add(g.getId()) + mappedVertices = set(isom.values()) + for v in c.getRightMostPath(): + mappedV = isom[v] + mappedVLabel = g.getVLabel(mappedV) + for x in g.getAllNeighbors(mappedV): + if x.getId() not in mappedVertices: + ee = _ab.ExtendedEdge(v, rightMost + 1, mappedVLabel, x.getLabel(), g.getEdgeLabel(mappedV, x.getId())) + extensions.setdefault(ee, set()).add(g.getId()) + return extensions
+ + + + +
+[docs] + def gSpan(self, graphDB, outputFrequentVertices): + if outputFrequentVertices or self.ELIMINATE_INFREQUENT_VERTICES: + self.findAllOnlyOneVertex(graphDB, outputFrequentVertices) + + for g in graphDB: + g.precalculateVertexList() + + if self.ELIMINATE_INFREQUENT_VERTEX_PAIRS or self.ELIMINATE_INFREQUENT_EDGE_LABELS: + self.removeInfrequentVertexPairs(graphDB) + + graphIds = set() + for i, g in enumerate(graphDB): + if g.vertices and len(g.vertices) != 0: + if self.infrequentVerticesRemovedCount > 0: + g.precalculateVertexList() + + graphIds.add(i) + g.precalculateVertexNeighbors() + g.precalculateLabelsToVertices() + else: + self.emptyGraphsRemoved += 1 + + if not outputFrequentVertices or self.frequentVertexLabels: + if self.DYNAMIC_SEARCH: + self.gspanDynamicDFS(_ab.DfsCode(), graphDB, graphIds) + + if self.THREADED_DYNAMIC_SEARCH: + self.startThreads(graphDB, self.candidates, self.minSup) + + else: + while self.candidates: + candidate = self.candidates.pop() + if len(candidate.setOfGraphsIds) < self.minSup: + continue + self.gspanDynamicDFS(candidate.dfsCode, graphDB, candidate.setOfGraphsIds) + else: + self.gspanDfs(_ab.DfsCode(), graphDB, graphIds)
+ + +
+[docs] + def startThreads(self, graphDB, candidates, minSup): + threads = [] + for _ in range(self.threadCount): + thread = _ab.DfsThread(graphDB, candidates, minSup, self) + thread.start() + threads.append(thread) + + for thread in threads: + thread.join()
+ + +
+[docs] + def gspanDfs(self, c: _ab.DfsCode, graphDB, subgraphId): + if c.size == self.maxNumberOfEdges - 1: + return + extensions = self.rightMostPathExtensions(c, graphDB, subgraphId) + for extension, newGraphIds in extensions.items(): + sup = len(newGraphIds) + if sup >= self.minSup: + newC = c.copy() + newC.add(extension) + + if self.isCanonical(newC): + subgraph = _ab.FrequentSubgraph(newC, newGraphIds, sup) + self.frequentSubgraphs.append(subgraph) + self.gspanDfs(newC, graphDB, newGraphIds)
+ + + +
+[docs] + def gspanDynamicDFS(self, c, graphDB, graphIds): + if c.size == self.maxNumberOfEdges - 1: + return + + extensions = self.rightMostPathExtensions(c, graphDB, graphIds) + for extension, newGraphIds in extensions.items(): + support = len(newGraphIds) + + if support >= self.minSup: + newC = c.copy() + newC.add(extension) + if self.isCanonical(newC): + subgraph = _ab.FrequentSubgraph(newC, newGraphIds, support) + self.savePattern(subgraph) + self.registerAsCandidate(subgraph)
+ + +
+[docs] + def registerAsCandidate(self, subgraph): + self.candidates.put((-subgraph.support, subgraph))
+ + + +
+[docs] + def isCanonical(self, c: _ab.DfsCode): + canC = _ab.DfsCode() + for i in range(c.size): + extensions = self.rightMostPathExtensionsFromSingle(canC, _ab.Graph(c)) + minEe = None + for ee in extensions.keys(): + if minEe is None or ee.smallerThan(minEe): + minEe = ee + + if minEe is not None and minEe.smallerThan(c.getAt(i)): + return False + + if minEe is not None: + canC.add(minEe) + return True
+ + + +
+[docs] + class Pair: + def __init__(self, x, y): + if x < y: + self.x = x + self.y = y + else: + self.x = y + self.y = x + + def __eq__(self, other): + if isinstance(other, TKG.Pair): + return self.x == other.x and self.y == other.y + return False + + def __hash__(self): + return self.x + 100 * self.y
+ + + +
+[docs] + def findAllOnlyOneVertex(self, graphDB, outputFrequentVertices): + self.frequentVertexLabels = [] + labelM = {} + for g in graphDB: + for v in g.getNonPrecalculatedAllVertices(): + if v.getEdgeList(): + vLabel = v.getLabel() + labelM.setdefault(vLabel, set()).add(g.getId()) + for label, tempSupG in labelM.items(): + sup = len(tempSupG) + if sup >= self.minSup: + self.frequentVertexLabels.append(label) + if outputFrequentVertices: + tempD = _ab.DfsCode() + tempD.add(_ab.ExtendedEdge(0, 0, label, label, -1)) + self.frequentSubgraphs.append(_ab.FrequentSubgraph(tempD, tempSupG, sup)) + elif TKG.ELIMINATE_INFREQUENT_VERTICES: + for graphId in tempSupG: + g = graphDB[graphId] + g.removeInfrequentLabel(label) + self.infrequentVerticesRemovedCount += 1
+ + +
+[docs] + def removeInfrequentVertexPairs(self, graphDB): + if TKG.ELIMINATE_INFREQUENT_EDGE_LABELS: + matrix = _ab.SparseTriangularMatrix() + alreadySeenPair = set() + + if TKG.ELIMINATE_INFREQUENT_EDGE_LABELS: + mapEdgeLabelToSupport = {} + alreadySeenEdgeLabel = set() + + for g in graphDB: + vertices = g.getAllVertices() + + for v1 in vertices: + labelV1 = v1.getLabel() + + for edge in v1.getEdgeList(): + v2 = edge.another(v1.getId()) + labelV2 = g.getVLabel(v2) + + if TKG.ELIMINATE_INFREQUENT_EDGE_LABELS: + pair = self.Pair(labelV1, labelV2) + if pair not in alreadySeenPair: + matrix.incrementCount(labelV1, labelV2) + alreadySeenPair.add(pair) + + if TKG.ELIMINATE_INFREQUENT_EDGE_LABELS: + edgeLabel = edge.getEdgeLabel() + if edgeLabel not in alreadySeenEdgeLabel: + alreadySeenEdgeLabel.add(edgeLabel) + edgeSupport = mapEdgeLabelToSupport.get(edgeLabel, 0) + mapEdgeLabelToSupport[edgeLabel] = edgeSupport + 1 + + if TKG.ELIMINATE_INFREQUENT_VERTEX_PAIRS: + alreadySeenPair.clear() + if TKG.ELIMINATE_INFREQUENT_EDGE_LABELS: + alreadySeenEdgeLabel.clear() + + if TKG.ELIMINATE_INFREQUENT_VERTEX_PAIRS: + matrix.removeInfrequentEntriesFromMatrix(self.minSup) + + if TKG.ELIMINATE_INFREQUENT_VERTEX_PAIRS or TKG.ELIMINATE_INFREQUENT_EDGE_LABELS: + for g in graphDB: + vertices = g.getAllVertices() + + for v1 in vertices: + iterEdges = iter(v1.getEdgeList()) + for edge in iterEdges: + v2 = edge.another(v1.getId()) + labelV2 = g.getVLabel(v2) + count = matrix.getSupportForItems(v1.getLabel(), labelV2) + + if TKG.ELIMINATE_INFREQUENT_VERTEX_PAIRS and count < self.minSup: + v1.removeEdge(edge) + self.infrequentVertexPairsRemoved += 1 + + elif TKG.ELIMINATE_INFREQUENT_EDGE_LABELS and \ + mapEdgeLabelToSupport.get(edge.getEdgeLabel(), 0) < self.minSup: + v1.removeEdge(edge) + self.edgeRemovedByLabel += 1
+ + +
+[docs] + def getMemoryRSS(self): + return self._memoryRSS
+ + +
+[docs] + def getMemoryUSS(self): + return self._memoryUSS
+ + +
+[docs] + def getRuntime(self): + return self.runtime
+ + +
+[docs] + def getMinSupport(self): + return self.minSup
+ + +
+[docs] + def getKSubgraphs(self): + subgraphsList = self.getSubgraphsList() + + for i, subgraph in enumerate(subgraphsList): + sb = [] + dfsCode = subgraph.dfsCode + + sb.append(f"t # {i} * {subgraph.support}\n") + if len(dfsCode.eeList) == 1: + ee = dfsCode.eeList[0] + sb.append(f"v 0 {ee.vLabel1}\n") + if ee.edgeLabel != -1: + sb.append(f"v 1 {ee.vLabel2}\n") + sb.append(f"e 0 1 {ee.edgeLabel}\n") + else: + vLabels = dfsCode.getAllVLabels() + for j, vLabel in enumerate(vLabels): + sb.append(f"v {j} {vLabel}\n") + for ee in dfsCode.eeList: + sb.append(f"e {ee.v1} {ee.v2} {ee.edgeLabel}\n") + + if self.outputGraphIds: + sb.append("x " + " ".join(str(id) for id in subgraph.setOfGraphsIds)) + sb.append("\n\n") + print("".join(sb))
+ + + +
+[docs] + def getSubgraphs(self): + """Creates a copy of the queue's contents without emptying the original queue.""" + subgraphsList = list(self.kSubgraphs.queue) + subgraphsList.sort(key=lambda sg: sg.support, reverse=True) + return subgraphsList
+
+ + + + + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/vertex.html b/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/vertex.html new file mode 100644 index 000000000..70a96fb24 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/subgraphMining/topK/vertex.html @@ -0,0 +1,152 @@ + + + + + + PAMI.subgraphMining.topK.vertex — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.subgraphMining.topK.vertex

+
+[docs] +class Vertex: + def __init__(self, id, vLabel): + self.id = id + self.vLabel = vLabel + self.eList = [] + +
+[docs] + def addEdge(self, edge): + self.eList.append(edge)
+ + +
+[docs] + def getId(self): + return self.id
+ + +
+[docs] + def getLabel(self): + return self.vLabel
+ + +
+[docs] + def getEdgeList(self): + return self.eList
+ + + def __eq__(self, other): + if not isinstance(other, Vertex): + return False + return self.id == other.id + + def __lt__(self, other): + if not isinstance(other, Vertex): + return NotImplemented + return self.id < other.id + + def __repr__(self): + return f"Vertex(ID: {self.id}, Label: {self.vLabel})" + +
+[docs] + def removeEdge(self, edgeToRemove): + self.eList = [edge for edge in self.eList if edge != edgeToRemove]
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/uncertainFaultTolerantFrequentPattern/VBFTMine.html b/sphinx/_build/html/_modules/PAMI/uncertainFaultTolerantFrequentPattern/VBFTMine.html new file mode 100644 index 000000000..73f726a32 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/uncertainFaultTolerantFrequentPattern/VBFTMine.html @@ -0,0 +1,573 @@ + + + + + + PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine

+# VBFTMine is one of the fundamental algorithm to discover fault-tolerant frequent patterns in an uncertain transactional database based on bitset representation.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             import PAMI.uncertainFaultTolerantFrequentPattern.basic.VBFTMine as alg
+#
+#             obj = alg.VBFTMine(iFile, minSup, itemSup, minLength, faultTolerance)
+#
+#             obj.startMine()
+#
+#             faultTolerantFrequentPattern = obj.getPatterns()
+#
+#             print("Total number of Fault Tolerant Frequent Patterns:", len(faultTolerantFrequentPattern))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+"""
+
+import pandas as pd
+from deprecated import deprecated
+
+import numpy as _np
+from PAMI.faultTolerantFrequentPattern.basic import abstract as _ab
+
+
+[docs] +class VBFTMine(_ab._faultTolerantFrequentPatterns): + """ + + :Description: VBFTMine is one of the fundamental algorithm to discover fault tolerant frequent patterns in an uncertain transactional database based on + bitset representation. + This program employs apriori property (or downward closure property) to reduce the search space effectively. + + :Reference: Koh, JL., Yo, PW. (2005). An Efficient Approach for Mining Fault-Tolerant Frequent Patterns Based on Bit Vector Representations. + In: Zhou, L., Ooi, B.C., Meng, X. (eds) Database Systems for Advanced Applications. DASFAA 2005. Lecture Notes in Computer Science, + vol 3453. Springer, Berlin, Heidelberg. https://doi.org/10.1007/11408079_51 + + :param iFile: str : + Name of the Input file to mine complete set of uncertain Fault Tolerant FrequentFrequent Patterns + :param oFile: str : + Name of the output file to store complete set of uncertain Fault Tolerant FrequentFrequent Patterns + :param minSup: float or int or str : + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + :param itemSup: int or float : + Frequency of an item + :param minLength: int + minimum length of a pattern + :param faultTolerance: int : + The ability of a pattern mining algorithm to handle errors or inconsistencies in the data without completely failing or producing incorrect results. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + startTime : float + To record the start time of the mining process + + endTime : float + To record the completion time of the mining process + + finalPatterns : dict + Storing the complete set of patterns in a dictionary variable + + memoryUSS : float + To store the total amount of USS memory consumed by the program + + memoryRSS : float + To store the total amount of RSS memory consumed by the program + + Database : list + To store the transactions of a database in list + + + **Executing the code on terminal**: + ------------------------------------ + .. code-block:: console + + + Format: + + (.venv) $ python3 VBFTMine.py <inputFile> <outputFile> <minSup> <itemSup> <minLength> <faultTolerance> + + Examples usage: + + (.venv) $ python3 VBFTMine.py sampleDB.txt patterns.txt 10.0 3.0 3 1 + + + .. note:: minSup will be considered in times of minSup and count of database transactions + + + **Sample run of the importing code**: + -------------------------------------------- + .. code-block:: python + + import PAMI.faultTolerantFrequentPattern.basic.VBFTMine as alg + + obj = alg.VBFTMine(iFile, minSup, itemSup, minLength, faultTolerance) + + obj.startMine() + + faultTolerantFrequentPattern = obj.getPatterns() + + print("Total number of Fault Tolerant Frequent Patterns:", len(faultTolerantFrequentPattern)) + + obj.save(oFile) + + Df = obj.getPatternInDataFrame() + + print("Total Memory in USS:", obj.getMemoryUSS()) + + print("Total Memory in RSS", obj.getMemoryRSS()) + + print("Total ExecutionTime in seconds:", obj.getRuntime()) + + **Credits**: + ------------ + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + _minSup = float() + _itemSup = float() + _minLength = int() + _faultTolerance = int() + _startTime = float() + _endTime = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _plist = [] + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _mapSupport = {} + + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + temp = [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + temp = self._iFile['Transactions'].tolist() + + for k in temp: + self._Database.append(set(k)) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(set(temp)) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + for i in temp: + if i not in self._plist: + self._plist.append(i) + self._Database.append(set(temp)) + except IOError: + print("File Not Found") + quit() + + def _convert(self, value): + """ + To convert the user specified minSup value + + :param value: user specified minSup value + :return: converted type + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + def _Count(self, tids): + count = 0 + for i in tids: + if i == 1: + count += 1 + return count + + def _save(self, prefix, suffix, tidsetx): + if (prefix == None): + prefix = suffix + else: + prefix = prefix + suffix + prefix = list(set(prefix)) + prefix.sort() + val = self._Count(tidsetx) + if len(prefix) > self._faultTolerance: + self._finalPatterns[tuple(prefix)] = val + + def _processEquivalenceClass(self, prefix, itemsets, tidsets): + if (len(itemsets) == 1): + i = itemsets[0] + tidi = tidsets[0] + self._save(prefix, [i], tidi) + return + for i in range(len(itemsets)): + itemx = itemsets[i] + if (itemx == None): + continue + tidsetx = tidsets[i] + classItemsets = [] + classtidsets = [] + itemsetx = [itemx] + for j in range(i + 1, len(itemsets)): + itemj = itemsets[j] + tidsetj = tidsets[j] + y = list(_np.array(tidsetx) & _np.array(tidsetj)) + total = self._Count(y) + if total >= self._minSup: + classItemsets.append(itemj) + classtidsets.append(y) + if (len(classItemsets) > 0): + newprefix = list(set(itemsetx)) + prefix + self._processEquivalenceClass(newprefix, classItemsets, classtidsets) + self._save(prefix, list(set(itemsetx)), tidsetx) + + def _oneLengthFrequentItems(self): + """ + To calculate the one Length items + """ + Vector = {} + items = [] + for i in self._Database: + for j in self._plist: + count = 0 + if j in i: + count = 1 + if j in Vector: + Vector[j].append(count) + else: + Vector[j] = [count] + for x, y in Vector.items(): + v = self._Count(y) + if v >= self._itemSup: + items.append(x) + return Vector, items + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self): + """ + Frequent pattern mining process will start from here + """ + self.mine()
+ + +
+[docs] + def mine(self): + """ + Frequent pattern mining process will start from here + """ + self._Database = [] + self._startTime = _ab._time.time() + self._creatingItemSets() + self._minSup = self._convert(self._minSup) + self._itemSup = self._convert(self._itemSup) + self._minLength = int(self._minLength) + self._faultTolerance = int(self._faultTolerance) + Vector, plist = self._oneLengthFrequentItems() + for i in range(len(plist)): + itemx = plist[i] + tidsetx = Vector[itemx] + itemsetx = [itemx] + itemsets = [] + tidsets = [] + for j in range(i + 1, len(plist)): + itemj = plist[j] + tidsetj = Vector[itemj] + y1 = list(_np.array(tidsetx) | _np.array(tidsetj)) + total = self._Count(y1) + if total >= self._minSup: + itemsets.append(itemj) + tidsets.append(y1) + if (len(itemsets) > 0): + self._processEquivalenceClass(itemsetx, itemsets, tidsets) + self._save(None, itemsetx, tidsetx) + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss + print("Fault-Tolerant Frequent patterns were generated successfully using VBFTMine algorithm ")
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataFrame = {} + data = [] + for a, b in self._finalPatterns.items(): + s = str() + for i in a: + s = s + i + ' ' + data.append([s, b]) + dataFrame = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + # dataFrame = dataFrame.replace(r'\r+|\n+|\t+',' ', regex=True) + return dataFrame
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s = str() + for i in x: + s = s + i + '\t' + s1 = s.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Total number of Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 7 or len(_ab._sys.argv) == 8: + if len(_ab._sys.argv) == 8: + _ap = VBFTMine(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], + _ab._sys.argv[5], _ab._sys.argv[6], _ab._sys.argv[7],) + if len(_ab._sys.argv) == 7: + _ap = VBFTMine(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5], _ab._sys.argv[6]) + _ap.startMine() + _ap.mine() + print("Total number of Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + _ap = VBFTMine('/Users/Likhitha/Downloads/fault/sample4.txt', 5, 3, 2, 1, ' ') + _ap.startMine() + _ap.printResults() + print(_ap.getPatternsAsDataFrame()) + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/uncertainFrequentPattern/basic/CUFPTree.html b/sphinx/_build/html/_modules/PAMI/uncertainFrequentPattern/basic/CUFPTree.html new file mode 100644 index 000000000..2c452bc17 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/uncertainFrequentPattern/basic/CUFPTree.html @@ -0,0 +1,912 @@ + + + + + + PAMI.uncertainFrequentPattern.basic.CUFPTree — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.uncertainFrequentPattern.basic.CUFPTree

+# CUFPTree is one of the fundamental algorithm to discover frequent patterns in a uncertain transactional database using CUFP-Tree
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.uncertainFrequentPattern.basic import CUFPTree as alg
+#
+#             obj = alg.CUFPTree(iFile, minSup,oFile,sep)
+#
+#             obj.startMine()
+#
+#             frequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(frequentPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+import pandas as pd
+from deprecated import deprecated
+
+from PAMI.uncertainFrequentPattern.basic import abstract as _ab
+from typing import List, Tuple
+
+
+_minSup = str()
+_ab._sys.setrecursionlimit(20000)
+_finalPatterns = {}
+
+
+class _Item:
+    """
+    A class used to represent the item with probability in transaction of dataset
+
+    :Attributes:
+
+        item : int or word
+            Represents the name of the item
+        probability : float
+            Represent the existential probability(likelihood presence) of an item
+    """
+
+    def __init__(self, item, probability) -> None:
+        self.item = item
+        self.probability = probability
+
+
+class _Node(object):
+    """
+    A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+
+        item : int
+            storing item of a node
+        probability : int
+            To maintain the expected support of node
+        parent : node
+            To maintain the parent of every node
+        children : list
+            To maintain the children of node
+
+    :Methods:
+
+        addChild(itemName)
+            storing the children to their respective parent nodes
+    """
+
+    def __init__(self, item, children) -> None:
+        self.item = item
+        self.probability = 1
+        self.children = children
+        self.parent = None
+
+    def addChild(self, node) -> None:
+        self.children[node.item] = node
+        node.parent = self
+
+
+class _Tree(object):
+    """
+    A class used to represent the frequentPatternGrowth tree structure
+
+    :Attributes:
+
+        root : Node
+            Represents the root node of the tree
+        summaries : dictionary
+            storing the nodes with same item name
+        info : dictionary
+            stores the support of items
+    :Methods:
+
+        addTransaction(transaction)
+            creating transaction as a branch in frequentPatternTree
+        addConditionalPattern(prefixPaths, supportOfItems)
+            construct the conditional tree for prefix paths
+        conditionalPatterns(Node)
+            generates the conditional patterns from tree for specific node
+        conditionalTransactions(prefixPaths,Support)
+            takes the prefixPath of a node and support at child of the path and extract the frequent items from
+            prefixPaths and generates prefixPaths with items which are frequent
+        remove(Node)
+            removes the node from tree once after generating all the patterns respective to the node
+        generatePatterns(Node)
+            starts from the root node of the tree and mines the frequent patterns
+    """
+
+    def __init__(self) -> None:
+        self.root = _Node(None, {})
+        self.summaries = {}
+        self.info = {}
+
+    def addTransaction(self, transaction) -> None:
+        """
+        adding transaction into tree
+        :param transaction : it represents the one self.Database in database
+        :type transaction : list
+        :return: None
+        """
+
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i].item not in currentNode.children:
+                newNode = _Node(transaction[i].item, {})
+                l1 = i - 1
+                lp = []
+                while l1 >= 0:
+                    lp.append(transaction[l1].probability)
+                    l1 -= 1
+                if len(lp) == 0:
+                    newNode.probability = transaction[i].probability
+                else:
+                    newNode.probability = max(lp) * transaction[i].probability
+                currentNode.addChild(newNode)
+                if transaction[i].item in self.summaries:
+                    self.summaries[transaction[i].item].append(newNode)
+                else:
+                    self.summaries[transaction[i].item] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i].item]
+                l1 = i - 1
+                lp = []
+                while l1 >= 0:
+                    lp.append(transaction[l1].probability)
+                    l1 -= 1
+                if len(lp) == 0:
+                    currentNode.probability += transaction[i].probability
+                else:
+                    currentNode.probability += max(lp) * transaction[i].probability
+
+    def addConditionalPattern(self, transaction, sup) -> None:
+        """
+        constructing conditional tree from prefixPaths
+        :param transaction : it represents the one self.Database in database
+        :type transaction : list
+        :param sup : support of prefixPath taken at last child of the path
+        :type sup : int
+        :return: None
+        """
+
+        # This method takes transaction, support and constructs the conditional tree
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _Node(transaction[i], {})
+                newNode.probability = sup
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].append(newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+                currentNode.probability += sup
+
+    def conditionalPatterns(self, alpha) -> Tuple[List, List, dict]:
+        """
+        generates all the conditional patterns of respective node
+        :param alpha : it represents the Node in tree
+        :type alpha : _Node
+        :return: Tuple
+        """
+
+        # This method generates conditional patterns of node by traversing the tree
+        finalPatterns = []
+        sup = []
+        for i in self.summaries[alpha]:
+            s = i.probability
+            set2 = []
+            while i.parent.item is not None:
+                set2.append(i.parent.item)
+                i = i.parent
+            if len(set2) > 0:
+                set2.reverse()
+                finalPatterns.append(set2)
+                sup.append(s)
+        finalPatterns, support, info = self.conditionalTransactions(finalPatterns, sup)
+        return finalPatterns, support, info
+
+    def removeNode(self, nodeValue) -> None:
+        """
+        removing the node from tree
+
+        :param nodeValue : it represents the node in tree
+        :type nodeValue : node
+        :return: None
+        """
+
+        for i in self.summaries[nodeValue]:
+            del i.parent.children[nodeValue]
+
+    def conditionalTransactions(self, condPatterns, support) -> Tuple[List, List, dict]:
+        """
+        It generates the conditional patterns with frequent items
+
+        :param condPatterns : conditionalPatterns generated from conditionalPattern method for respective node
+        :type condPatterns : list
+        :support : the support of conditional pattern in tree
+        :support : int
+        :return: Tuple consist of patterns,support and updated Dictionary
+        :rtype: Tuple
+        """
+
+        global minSup
+        pat = []
+        sup = []
+        count = {}
+        for i in range(len(condPatterns)):
+            for j in condPatterns[i]:
+                if j in count:
+                    count[j] += support[i]
+                else:
+                    count[j] = support[i]
+        updatedDict = {}
+        updatedDict = {k: v for k, v in count.items() if v >= minSup}
+        count = 0
+        for p in condPatterns:
+            p1 = [v for v in p if v in updatedDict]
+            trans = sorted(p1, key=lambda x: updatedDict[x], reverse=True)
+            if len(trans) > 0:
+                pat.append(trans)
+                sup.append(support[count])
+                count += 1
+        return pat, sup, updatedDict
+
+    def generatePatterns(self, prefix) -> None:
+        """
+        Generates the patterns
+
+        :param prefix : forms the combination of items
+        :type prefix : list
+        :return: None
+        """
+
+        global _finalPatterns, minSup
+        for i in sorted(self.summaries, key=lambda x: (self.info.get(x))):
+            pattern = prefix[:]
+            pattern.append(i)
+            s = 0
+            for x in self.summaries[i]:
+                s += x.probability
+            _finalPatterns[tuple(pattern)] = self.info[i]
+            if s >= minSup:
+                patterns, support, info = self.conditionalPatterns(i)
+                conditionalTree = _Tree()
+                conditionalTree.info = info.copy()
+                for pat in range(len(patterns)):
+                    conditionalTree.addConditionalPattern(patterns[pat], support[pat])
+                if len(patterns) > 0:
+                    conditionalTree.generatePatterns(pattern)
+            self.removeNode(i)
+
+
+[docs] +class CUFPTree(_ab._frequentPatterns): + + """ + :Description: It is one of the fundamental algorithm to discover frequent patterns in a uncertain transactional database using CUFP-Tree. + + :Reference: + Chun-Wei Lin Tzung-PeiHong, 'new mining approach for uncertain databases using CUFP trees', + Expert Systems with Applications, Volume 39, Issue 4, March 2012, Pages 4084-4093, https://doi.org/10.1016/j.eswa.2011.09.087 + + :param iFile: str : + Name of the Input file to mine complete set of Uncertain Frequent Patterns + :param oFile: str : + Name of the output file to store complete set of Uncertain frequent patterns + :param minSup: int or float or str : + minimum support thresholds were tuned to find the appropriate ranges in the limited memory + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes: + + iFile : file + Name of the Input file or path of the input file + oFile : file + Name of the output file or path of the output file + minSup: float or int or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + To represent the total no of transaction + tree : class + To represents the Tree class + itemSetCount : int + To represents the total no of patterns + finalPatterns : dict + To store the complete patterns + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets(fileName) + Scans the dataset and stores in a list format + frequentOneItem() + Extracts the one-length frequent patterns from database + updateTransactions() + Update the transactions by removing non-frequent items and sort the Database by item decreased support + buildTree() + After updating the Database, remaining items will be added into the tree by setting root node as null + convert() + to convert the user specified value + startMine() + Mining process will start from this function + + **Methods to execute code on terminal** + -------------------------------------------- + + .. code-block:: console + + + Format: + + (.venv) $ python3 CUFPTree.py <inputFile> <outputFile> <minSup> + + Example Usage: + + (.venv) $ python3 CUFPTree.py sampleTDB.txt patterns.txt 3 + + + + .. note:: minSup will be considered in support count or frequency + + **Importing this algorithm into a python program** + ---------------------------------------------------- + .. code-block:: python + + from PAMI.uncertainFrequentPattern.basic import CUFPTree as alg + + obj = alg.CUFPTree(iFile, minSup)v + + obj.startMine() + + frequentPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(frequentPatterns)) + + obj.save(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + -------------- + + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. +""" + _startTime = float() + _endTime = float() + _minSup = str() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _rank = {} + + def __init__(self, iFile, minSup, sep='\t') -> None: + super().__init__(iFile, minSup, sep) + + def _creatingItemSets(self) -> None: + """ + Scans the uncertain transactional dataset + :return: None + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + uncertain, data = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + if 'uncertain' in i: + uncertain = self._iFile['uncertain'].tolist() + for k in range(len(data)): + tr = [] + for j in range(len(data[k])): + product = _Item(data[k][j], uncertain[k][j]) + tr.append(product) + self._Database.append(tr) + + # print(self.Database) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + tr = [] + for i in temp: + i1 = i.index('(') + i2 = i.index(')') + item = i[0:i1] + probability = float(i[i1 + 1:i2]) + product = _Item(item, probability) + tr.append(product) + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r') as f: + for line in f: + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + tr = [] + for i in temp: + i1 = i.index('(') + i2 = i.index(')') + item = i[0:i1] + probability = float(i[i1 + 1:i2]) + product = _Item(item, probability) + tr.append(product) + self._Database.append(tr) + except IOError: + print("File Not Found") + + def _frequentOneItem(self) -> Tuple[dict, List]: + """ + Takes the self.Database and calculates the support of each item in the dataset and assign the ranks to the items by decreasing support and returns the frequent items list + + :param self.Database : it represents the one self.Database in database + :type self.Database : list + :return: tuple + """ + + mapSupport = {} + for i in self._Database: + for j in i: + if j.item not in mapSupport: + mapSupport[j.item] = j.probability + else: + mapSupport[j.item] += j.probability + mapSupport = {k: v for k, v in mapSupport.items() if v >= self._minSup} + plist = [k for k, v in sorted(mapSupport.items(), key=lambda x: x[1], reverse=True)] + self.rank = dict([(index, item) for (item, index) in enumerate(plist)]) + return mapSupport, plist + + @staticmethod + def _buildTree(data, info) -> '_Tree': + """ + It takes the self.Database and support of each item and construct the main tree with setting root node as null + + :param data : it represents the one self.Database in database + :type data : list + :param info : it represents the support of each item + :type info : dictionary + :return: Dictionary + """ + + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(data)): + rootNode.addTransaction(data[i]) + return rootNode + + def _updateTransactions(self, dict1) -> List: + """ + Remove the items which are not frequent from self.Database and updates the self.Database with rank of items + + :param dict1 : frequent items with support + :type dict1 : dictionary + :return: list + """ + + list1 = [] + for tr in self._Database: + list2 = [] + for i in range(0, len(tr)): + if tr[i].item in dict1: + list2.append(tr[i]) + if len(list2) >= 2: + basket = list2 + basket.sort(key=lambda val: self.rank[val.item]) + list2 = basket + list1.append(list2) + return list1 + + @staticmethod + def _check(i, x) -> int: + """ + To check the presence of item or pattern in transaction + + :param x: it represents the pattern + :type x : list + :param i : represents the uncertain self.Database + :type i : list + :return: int + """ + + # This method taken a transaction as input and returns the tree + for m in x: + k = 0 + for n in i: + if m == n.item: + k += 1 + if k == 0: + return 0 + return 1 + + def _convert(self, value) -> float: + """ + To convert the type of user specified minSup value + + :param value: user specified minSup value + :return: converted type minSup value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = (len(self._Database) * value) + else: + value = int(value) + return value + + def _removeFalsePositives(self) -> None: + """ + + To remove the false positive patterns generated in frequent patterns. + + :return: patterns with accurate probability + """ + global _finalPatterns + periods = {} + for i in self._Database: + for x, y in _finalPatterns.items(): + if len(x) == 1: + periods[x] = y + else: + s = 1 + check = self._check(i, x) + if check == 1: + for j in i: + if j.item in x: + s *= j.probability + if x in periods: + periods[x] += s + else: + periods[x] = s + for x, y in periods.items(): + if y >= self._minSup: + sample = str() + for i in x: + sample = sample + i + "\t" + self._finalPatterns[sample] = y + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self) -> None: + """ + Main method where the patterns are mined by constructing tree and remove the false patterns by counting the original support of a patterns. + :return: None + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + Main method where the patterns are mined by constructing tree and remove the false patterns by counting the original support of a patterns. + :return: None + """ + global minSup + self._startTime = _ab._time.time() + self._creatingItemSets() + self._minSup = self._convert(self._minSup) + minSup = self._minSup + self._finalPatterns = {} + mapSupport, plist = self._frequentOneItem() + self.Database1 = self._updateTransactions(mapSupport) + info = {k: v for k, v in mapSupport.items()} + Tree1 = self._buildTree(self.Database1, info) + Tree1.generatePatterns([]) + self._removeFalsePositives() + print("Uncertain Frequent patterns were successfully generated using CUFPTree algorithm") + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self.memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self.memoryRSS = process.memory_info().rss
+ + + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self.memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _ab._pd.DataFrame: + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataframe = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataframe = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataframe
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self.oFile = outFile + writer = open(self.oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> dict: + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + :return: None + """ + print("Total number of Uncertain Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 4 or len(_ab._sys.argv) == 5: + if len(_ab._sys.argv) == 5: + _ap = CUFPTree(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + if len(_ab._sys.argv) == 4: + _ap = CUFPTree(_ab._sys.argv[1], _ab._sys.argv[3]) + _ap.startMine() + _ap.mine() + print("Total number of Uncertain Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/uncertainGeoreferencedFrequentPattern/basic/GFPGrowth.html b/sphinx/_build/html/_modules/PAMI/uncertainGeoreferencedFrequentPattern/basic/GFPGrowth.html new file mode 100644 index 000000000..da005ca49 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/uncertainGeoreferencedFrequentPattern/basic/GFPGrowth.html @@ -0,0 +1,957 @@ + + + + + + PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth

+# GFPGrowth algorithm is used to discover geo-referenced frequent patterns in a uncertain transactional database using GFP-Tree.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.uncertainGeoreferencedFrequentPattern.basic import GFPGrowth as alg
+#
+#             obj = alg.GFPGrowth(iFile, nFile, minSup,sep, oFile)
+#
+#             obj.startMine()
+#
+#             Patterns = obj.getPatterns()
+#
+#             print("Total number of  Patterns:", len(Patterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+from PAMI.uncertainGeoreferencedFrequentPattern.basic import abstract as _ab
+import pandas as pd
+from deprecated import deprecated
+
+_minSup = str()
+_neighbourList = {}
+_ab._sys.setrecursionlimit(20000)
+_finalPatterns = {}
+
+
+class _Item:
+    """
+    A class used to represent the item with probability in transaction of dataset
+
+    :Attributes:
+
+        item : int or word
+            Represents the name of the item
+        probability : float
+            Represent the existential probability(likelihood presence) of an item
+    """
+
+    def __init__(self, item, probability):
+        self.item = item
+        self.probability = probability
+
+
+class _Node(object):
+    """
+    A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+
+        item : int
+            storing item of a node
+        probability : int
+            To maintain the expected support of node
+        parent : node
+            To maintain the parent of every node
+        children : list
+            To maintain the children of node
+
+    :Methods:
+
+        addChild(itemName)
+            storing the children to their respective parent nodes
+    """
+
+    def __init__(self, item, children):
+        self.item = item
+        self.probability = 1
+        self.children = children
+        self.parent = None
+
+    def addChild(self, node):
+        self.children[node.item] = node
+        node.parent = self
+
+
+class _Tree(object):
+    """
+    A class used to represent the frequentPatternGrowth tree structure
+
+    :Attributes:
+
+        root : Node
+            Represents the root node of the tree
+        summaries : dictionary
+            storing the nodes with same item name
+        info : dictionary
+            stores the support of items
+    :Methods:
+
+        addTransaction(transaction)
+            creating transaction as a branch in frequentPatternTree
+        addConditionalPattern(prefixPaths, supportOfItems)
+            construct the conditional tree for prefix paths
+        conditionalPatterns(Node)
+            generates the conditional patterns from tree for specific node
+        conditionalTransactions(prefixPaths,Support)
+            takes the prefixPath of a node and support at child of the path and extract the frequent items from
+            prefixPaths and generates prefixPaths with items which are frequent
+        remove(Node)
+            removes the node from tree once after generating all the patterns respective to the node
+        generatePatterns(Node)
+            starts from the root node of the tree and mines the frequent patterns
+    """
+
+    def __init__(self):
+        self.root = _Node(None, {})
+        self.summaries = {}
+        self.info = {}
+
+    def addTransaction(self, transaction):
+        """
+        Adding transaction into tree
+
+        :param transaction : it represents the one self.Database in database
+        :type transaction : list
+        """
+        global _neighbourList
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i].item not in currentNode.children:
+                newNode = _Node(transaction[i].item, {})
+                nei = _neighbourList.get(transaction[i].item)
+                l1 = i - 1
+                lp = []
+                while l1 >= 0:
+                    if nei == None:
+                        break
+                    if transaction[l1].item in nei:
+                        lp.append(transaction[l1].probability)
+                    l1 -= 1
+                if len(lp) == 0:
+                    newNode.probability = transaction[i].probability
+                else:
+                    newNode.probability = max(lp) * transaction[i].probability
+                currentNode.addChild(newNode)
+                if transaction[i].item in self.summaries:
+                    self.summaries[transaction[i].item].append(newNode)
+                else:
+                    self.summaries[transaction[i].item] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i].item]
+                l1 = i - 1
+                lp = []
+                while l1 >= 0:
+                    lp.append(transaction[l1].probability)
+                    l1 -= 1
+                if len(lp) == 0:
+                    currentNode.probability += transaction[i].probability
+                else:
+                    currentNode.probability += max(lp) * transaction[i].probability
+
+    def addConditionalPattern(self, transaction, sup):
+        """
+        constructing conditional tree from prefixPaths
+
+        :param transaction : it represents the one self.Database in database
+        :type transaction : list
+        :param sup : support of prefixPath taken at last child of the path
+        :type sup : int
+        """
+
+        # This method takes transaction, support and constructs the conditional tree
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _Node(transaction[i], {})
+                newNode.probability = sup
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].append(newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+                currentNode.probability += sup
+
+    def conditionalPatterns(self, alpha):
+        """
+        Generates all the conditional patterns of respective node
+
+        :param alpha : it represents the Node in tree
+        :type alpha : _Node
+        """
+
+        # This method generates conditional patterns of node by traversing the tree
+        global _neighbourList
+        finalPatterns = []
+        sup = []
+        for i in self.summaries[alpha]:
+            j = i.item
+            s = i.probability
+            set2 = []
+            while i.parent.item is not None:
+                if _neighbourList.get(j) is not None:
+                    #print(_neighbourList.get(j))
+                    if i.parent.item in _neighbourList[j]:
+                        set2.append(i.parent.item)
+                i = i.parent
+            if len(set2) > 0:
+                set2.reverse()
+                finalPatterns.append(set2)
+                sup.append(s)
+        finalPatterns, support, info = self.conditionalTransactions(finalPatterns, sup)
+        return finalPatterns, support, info
+
+    def removeNode(self, nodeValue):
+        """
+        Removing the node from tree
+
+        :param nodeValue : it represents the node in tree
+        :type nodeValue : node
+        """
+
+        for i in self.summaries[nodeValue]:
+            del i.parent.children[nodeValue]
+
+    def conditionalTransactions(self, condPatterns, support):
+        """
+        It generates the conditional patterns with frequent items
+
+        :param condPatterns : conditionalPatterns generated from conditionalPattern method for respective node
+        :type condPatterns : list
+        :support : the support of conditional pattern in tree
+        :support : int
+        """
+
+        global minSup
+        pat = []
+        sup = []
+        count = {}
+        for i in range(len(condPatterns)):
+            for j in condPatterns[i]:
+                if j in count:
+                    count[j] += support[i]
+                else:
+                    count[j] = support[i]
+        updatedDict = {}
+        updatedDict = {k: v for k, v in count.items() if v >= minSup}
+        count = 0
+        for p in condPatterns:
+            p1 = [v for v in p if v in updatedDict]
+            trans = sorted(p1, key=lambda x: updatedDict[x], reverse=True)
+            if len(trans) > 0:
+                pat.append(trans)
+                sup.append(support[count])
+                count += 1
+        return pat, sup, updatedDict
+
+    def generatePatterns(self, prefix):
+        """
+        Generates the patterns
+
+        :param prefix : forms the combination of items
+        :type prefix : list
+        """
+
+        global _finalPatterns, minSup
+        for i in sorted(self.summaries, key=lambda x: (self.info.get(x))):
+            pattern = prefix[:]
+            pattern.append(i)
+            s = 0
+            for x in self.summaries[i]:
+                s += x.probability
+            _finalPatterns[tuple(pattern)] = self.info[i]
+            if s >= minSup:
+                patterns, support, info = self.conditionalPatterns(i)
+                conditionalTree = _Tree()
+                conditionalTree.info = info.copy()
+                for pat in range(len(patterns)):
+                    conditionalTree.addConditionalPattern(patterns[pat], support[pat])
+                if len(patterns) > 0:
+                    conditionalTree.generatePatterns(pattern)
+            self.removeNode(i)
+
+
+
+[docs] +class GFPGrowth(_ab._frequentPatterns): + """ + :Description: GFPGrowth algorithm is used to discover geo-referenced frequent patterns in a uncertain transactional database using GFP-Tree. + + :Reference: + Palla Likhitha,Pamalla Veena, Rage, Uday Kiran, Koji Zettsu (2023). + "Discovering Geo-referenced Frequent Patterns in Uncertain Geo-referenced + Transactional Databases". PAKDD 2023. + https://doi.org/10.1007/978-3-031-33380-4_3 + + :param iFile: str : + Name of the Input file to mine complete set of uncertain Geo referenced Frequent Patterns + :param oFile: str : + Name of the output file to store complete set of Uncertain Geo referenced frequent patterns + :param minSup: str: + minimum support thresholds were tuned to find the appropriate ranges in the limited memory + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + :Attributes: + + iFile : file + Name of the Input file or path of the input file + oFile : file + Name of the output file or path of the output file + minSup: float or int or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + To represent the total no of transaction + tree : class + To represents the Tree class + itemSetCount : int + To represents the total no of patterns + finalPatterns : dict + To store the complete patterns + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + savePatterns(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets(fileName) + Scans the dataset and stores in a list format + frequentOneItem() + Extracts the one-length frequent patterns from database + updateTransactions() + Update the transactions by removing non-frequent items and sort the Database by item decreased support + buildTree() + After updating the Database, remaining items will be added into the tree by setting root node as null + convert() + to convert the user specified value + startMine() + Mining process will start from this function + + **Executing the code on terminal**: + ------------------------------------------ + + .. code-block:: console + + + Format: + + (.venv) $ python3 GFPGrowth.py <inputFile> <neighborFile> <outputFile> <minSup> + + Examples usage: + + (.venv) $ python3 GFPGrowth.py sampleTDB.txt sampleNeighbor.txt patterns.txt 3 + + + .. note:: minSup will be considered in support count or frequency + + **Sample run of importing the code**: + ---------------------------------------- + .. code-block:: python + + from PAMI.uncertainGeoreferencedFrequentPattern.basic import GFPGrowth as alg + + obj = alg.GFPGrowth(iFile, nFile, minSup) + + obj.startMine() + + Patterns = obj.getPatterns() + + print("Total number of Patterns:", len(Patterns)) + + obj.save(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits**: + ------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.\n + """ + _startTime = float() + _endTime = float() + _minSup = str() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _rank = {} + + def __init__(self, iFile, nFile, minSup, sep='\t'): + super().__init__(iFile, nFile, minSup, sep) + + def _creatingItemSets(self): + """ + Scans the uncertain transactional dataset + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + uncertain, data = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + if 'uncertain' in i: + uncertain = self._iFile['uncertain'].tolist() + for k in range(len(data)): + tr = [] + for j in range(len(data[k])): + product = _Item(data[k][j], uncertain[k][j]) + tr.append(product) + self._Database.append(tr) + + # print(self.Database) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + temp1 = line.strip() + temp1 = temp1.split(':') + temp = [i.rstrip() for i in temp1[0].split(self._sep)] + uncertain = [float(i.rstrip()) for i in temp1[1].split(self._sep)] + tr = [] + for i in range(len(temp)): + item = temp[i] + probability = uncertain[i] + product = _Item(item, probability) + tr.append(product) + self._Database.append(tr) + else: + try: + with open(self._iFile, 'r') as f: + for line in f: + temp1 = line.strip() + temp1 = temp1.split(':') + #temp1[0], temp1[1] = [i for i in temp1[0] if i], [i for i in temp1[1] if i] + temp = [i.rstrip() for i in temp1[0].split(self._sep) if i] + uncertain = [float(i.rstrip()) for i in temp1[1].split(self._sep) if i] + tr = [] + for i in range(len(temp)): + item = temp[i] + probability = uncertain[i] + product = _Item(item, probability) + tr.append(product) + self._Database.append(tr) + except IOError: + print("File Not Found") + + def _creatingNeighbours(self): + """ + Scans the uncertain transactional dataset + """ + global _neighbourList + _neighbourList = {} + if isinstance(self._nFile, _ab._pd.DataFrame): + uncertain, data = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + if 'uncertain' in i: + uncertain = self._iFile['uncertain'].tolist() + for k in range(len(data)): + tr = [] + for j in range(len(data[k])): + product = _Item(data[k][j], uncertain[k][j]) + tr.append(product) + self._Database.append(tr) + + # print(self.Database) + if isinstance(self._nFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + tr = [] + for i in temp: + i1 = i.index('(') + i2 = i.index(')') + item = i[0:i1] + probability = float(i[i1 + 1:i2]) + product = _Item(item, probability) + tr.append(product) + self._Database.append(temp) + else: + try: + with open(self._nFile, 'r') as f: + for line in f: + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + _neighbourList[temp[0]] = temp[1:] + except IOError: + print("File Not Found") + + def _frequentOneItem(self): + """ + Takes the self.Database and calculates the support of each item in the dataset and assign the ranks to the items by decreasing support and returns the frequent items list + + :param self.Database : it represents the one self.Database in database + :type self.Database : list + """ + + mapSupport = {} + for i in self._Database: + for j in i: + if j.item not in mapSupport: + mapSupport[j.item] = j.probability + else: + mapSupport[j.item] += j.probability + mapSupport = {k: v for k, v in mapSupport.items() if v >= self._minSup} + plist = [k for k, v in sorted(mapSupport.items(), key=lambda x: x[1], reverse=True)] + self.rank = dict([(index, item) for (item, index) in enumerate(plist)]) + return mapSupport, plist + + @staticmethod + def _buildTree(data, info): + """ + It takes the self.Database and support of each item and construct the main tree with setting root node as null + + :param data : it represents the one self.Database in database + :type data : list + :param info : it represents the support of each item + :type info : dictionary + """ + + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(data)): + rootNode.addTransaction(data[i]) + return rootNode + + def _updateTransactions(self, dict1): + """ + Remove the items which are not frequent from self.Database and updates the self.Database with rank of items + + :param dict1 : frequent items with support + :type dict1 : dictionary + """ + + list1 = [] + for tr in self._Database: + list2 = [] + for i in range(0, len(tr)): + if tr[i].item in dict1: + list2.append(tr[i]) + if len(list2) >= 2: + basket = list2 + basket.sort(key=lambda val: self.rank[val.item]) + list2 = basket + list1.append(list2) + return list1 + + @staticmethod + def _check(i, x): + """ + To check the presence of item or pattern in transaction + + :param x: it represents the pattern + :type x : list + :param i : represents the uncertain self.Database + :type i : list + """ + + # This method taken a transaction as input and returns the tree + for m in x: + k = 0 + for n in i: + if m == n.item: + k += 1 + if k == 0: + return 0 + return 1 + + def _convert(self, value): + """ + To convert the type of user specified minSup value + + :param value: user specified minSup value + :return: converted type minSup value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = (len(self._Database) * value) + else: + value = int(value) + return value + + def _removeFalsePositives(self): + """ + To remove the false positive patterns generated in frequent patterns. + + :return: patterns with accurate probability + """ + global _finalPatterns + periods = {} + for i in self._Database: + for x, y in _finalPatterns.items(): + if len(x) == 1: + periods[x] = y + else: + s = 1 + check = self._check(i, x) + if check == 1: + for j in i: + if j.item in x: + s *= j.probability + if x in periods: + periods[x] += s + else: + periods[x] = s + for x, y in periods.items(): + if y >= self._minSup: + sample = str() + for i in x: + sample = sample + i + "\t" + self._finalPatterns[sample] = y + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self): + """ + Main method where the patterns are mined by constructing tree and remove the false patterns by counting the original support of a patterns + """ + self.mine()
+ + +
+[docs] + def mine(self): + """ + Main method where the patterns are mined by constructing tree and remove the false patterns by counting the original support of a patterns + """ + global minSup + self._startTime = _ab._time.time() + self._creatingItemSets() + self._creatingNeighbours() + # self._minSup = self._convert(self._minSup) + minSup = self._minSup + self._finalPatterns = {} + mapSupport, plist = self._frequentOneItem() + self.Database1 = self._updateTransactions(mapSupport) + info = {k: v for k, v in mapSupport.items()} + Tree1 = self._buildTree(self.Database1, info) + Tree1.generatePatterns([]) + self._removeFalsePositives() + print("Geo-Referenced Frequent patterns were generated from uncertain databases successfully using GFP algorithm") + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self.memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self.memoryRSS = process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self.memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataframe = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a, b]) + dataframe = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataframe
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self.oFile = outFile + writer = open(self.oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + print("Total number of Patterns:", len(self.getPatterns())) + self.save("patterns.txt") + memUSS = self.getMemoryUSS() + print("Total Memory in USS:", memUSS) + memRSS = self.getMemoryRSS() + print("Total Memory in RSS", memRSS) + run = self.getRuntime() + print("Total ExecutionTime in ms:", run)
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = GFPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = GFPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + _ap.mine() + _Patterns = _ap.getPatterns() + print("Total number of Patterns:", len(_Patterns)) + _ap.save(_ab._sys.argv[2]) + _memUSS = _ap.getMemoryUSS() + print("Total Memory in USS:", _memUSS) + _memRSS = _ap.getMemoryRSS() + print("Total Memory in RSS", _memRSS) + _run = _ap.getRuntime() + print("Total ExecutionTime in ms:", _run) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/uncertainPeriodicFrequentPattern/basic/UPFPGrowth.html b/sphinx/_build/html/_modules/PAMI/uncertainPeriodicFrequentPattern/basic/UPFPGrowth.html new file mode 100644 index 000000000..af5ddae9b --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/uncertainPeriodicFrequentPattern/basic/UPFPGrowth.html @@ -0,0 +1,998 @@ + + + + + + PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth

+# UPFPGrowth is used to discover periodic-frequent patterns in an uncertain temporal database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.uncertainPeriodicFrequentPattern.basic import UPFPGrowth as alg
+#
+#             obj = alg.UPFPGrowth(iFile, minSup, maxPer)
+#
+#             obj.mine()
+#
+#             periodicFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+
+
+import pandas as pd
+from deprecated import deprecated
+from PAMI.uncertainPeriodicFrequentPattern.basic import abstract as _ab
+from typing import List, Dict, Tuple, Union
+
+_minSup = float()
+__maxPer = float()
+__first = int()
+_last = int()
+__lno = int()
+#rank = {}
+#periodic = {}
+
+class _Item:
+    """
+    A class used to represent the item with probability in transaction of dataset
+
+    :Attributes:
+
+        item: int or word
+            Represents the name of the item
+        probability: float
+            Represent the existential probability(likelihood presence) of an item
+    """
+
+    def __init__(self, item: str, probability: float) -> None:
+        self.item = item
+        self.probability = probability
+
+
+class _Node(object):
+    """
+    A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+
+        item: int
+            storing item of a node
+        probability: int
+            To maintain the expected support of node
+        parent: node
+            To maintain the parent of every node
+        children: list
+            To maintain the children of node
+        timeStamps: list
+            To maintain the timeStamps of node
+
+    :Methods:
+
+        addChild(itemName)
+            storing the children to their respective parent nodes
+    """
+
+    def __init__(self, item: str, children: Dict) -> None:
+        self.item = item
+        self.probability = 1
+        self.children = children
+        self.parent = None
+        self.timeStamps = []
+
+    def addChild(self, node: '_Node') -> None:
+        """
+        To add the children details to parent node
+
+        :param node: children node
+        :return: updated parent node children
+        """
+        self.children[node.item] = node
+        node.parent = self
+
+
+def _printTree(root) -> None:
+    """
+    To print the details of tree
+
+    :param root: root node of the tree
+    :return: details of tree
+    """
+    for x, y in root.children.items():
+        print(x, y.item, y.probability, y.parent.item, y.timeStamps)
+        _printTree(y)
+
+
+class _Tree(object):
+    """
+    A class used to represent the frequentPatternGrowth tree structure
+
+    :Attributes:
+        root : Node
+            Represents the root node of the tree
+        summaries : dictionary
+            storing the nodes with same item name
+        info : dictionary
+            stores the support of items
+
+    :Methods:
+        addTransactions(transaction)
+            creating transaction as a branch in frequentPatternTree
+        addConditionalTransaction(prefixPaths, supportOfItems)
+            construct the conditional tree for prefix paths
+        conditionalPatterns(Node)
+            generates the conditional patterns from tree for specific node
+        conditionalTransactions(prefixPaths,Support)
+            takes the prefixPath of a node and support at child of the path and extract the frequent items from
+            prefixPaths and generates prefixPaths with items which are frequent
+        remove(Node)
+            removes the node from tree once after generating all the patterns respective to the node generatePatterns(Node) starts from the root node of the tree and mines the frequent patterns
+    """
+
+    def __init__(self) -> None:
+        self.root = _Node(None, {})
+        self.summaries = {}
+        self.info = {}
+
+    def addTransactions(self, transaction: List['_Item'], tid: int) -> None:
+        """
+        Adding transaction into tree
+
+        :param transaction: it represents the one transaction in database
+        :type transaction: list
+        :param tid: the timestamp of transaction
+        :type tid: list
+        :return: None
+        """
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i].item not in currentNode.children:
+                newNode = _Node(transaction[i].item, {})
+                l1 = i - 1
+                temp = []
+                while l1 >= 0:
+                    temp.append(transaction[l1].probability)
+                    l1 -= 1
+                if len(temp) == 0:
+                    newNode.probability = transaction[i].probability
+                else:
+                    newNode.probability = max(temp) * transaction[i].probability
+                currentNode.addChild(newNode)
+                if transaction[i].item in self.summaries:
+                    self.summaries[transaction[i].item].append(newNode)
+                else:
+                    self.summaries[transaction[i].item] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i].item]
+                l1 = i - 1
+                temp = []
+                while l1 >= 0:
+                    temp.append(transaction[l1].probability)
+                    l1 -= 1
+                if len(temp) == 0:
+                    currentNode.probability += transaction[i].probability
+                else:
+                    currentNode.probability += max(temp) * transaction[i].probability
+        currentNode.timeStamps = currentNode.timeStamps + tid
+
+    def addConditionalTransaction(self, transaction: List[str], ts: List[int], sup: float) -> None:
+        """
+        Constructing conditional tree from prefixPaths
+
+        :param transaction : it represents the one transaction in database
+        :type transaction : list
+        :param ts: timeStamp of a transaction
+        :type ts: list
+        :param sup : support of prefixPath taken at last child of the path
+        :type sup : int
+        :return: None
+        """
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _Node(transaction[i], {})
+                newNode.probability = sup
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].append(newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+                currentNode.probability += sup
+        currentNode.timeStamps = currentNode.timeStamps + ts
+
+    def getConditionalPatterns(self, alpha: str) -> Tuple[List[List[str]], List[List[int]], List[float], Dict[str, List[float]]]:
+        """
+        Generates all the conditional patterns of respective node.
+
+        :param alpha : it represents the Node in tree
+        :type alpha : Node
+        :return: tuple
+        """
+
+        finalPatterns = []
+        finalTimeStamps = []
+        sup = []
+        for i in self.summaries[alpha]:
+            set1 = i.timeStamps
+            s = i.probability
+            set2 = []
+            while i.parent.item is not None:
+                set2.append(i.parent.item)
+                i = i.parent
+            if len(set2) > 0:
+                set2.reverse()
+                finalPatterns.append(set2)
+                finalTimeStamps.append(set1)
+                sup.append(s)
+        finalPatterns, finalTimeStamps, support, info = self.conditionalTransactions(finalPatterns, finalTimeStamps,
+                                                                                     sup)
+        return finalPatterns, finalTimeStamps, support, info
+
+    def removeNode(self, nodeValue: str) -> None:
+        """
+        Removing the node from tree
+
+        :param nodeValue : it represents the node in tree
+        :type nodeValue : node
+        :return: None
+        """
+        for i in self.summaries[nodeValue]:
+            i.parent.timeStamps = i.parent.timeStamps + i.timeStamps
+            del i.parent.children[nodeValue]
+
+    def getPeriodAndSupport(self, s: float, timeStamps: List[int]) -> List[float]:
+        global _lno, _maxPer
+        timeStamps.sort()
+        cur = 0
+        per = 0
+        sup = s
+        for j in range(len(timeStamps)):
+            per = max(per, timeStamps[j] - cur)
+            if per > _maxPer:
+                return [0, 0]
+            cur = timeStamps[j]
+        per = max(per, _lno - cur)
+        return [sup, per]
+
+    def conditionalTransactions(self, condPatterns: List[List[str]], condTimeStamps: List[List[int]], support: List[float]) -> Tuple[List[List[str]], List[List[int]], List[float], Dict[str, List[float]]]:
+        """
+        It generates the conditional patterns with frequent items
+
+        :param condPatterns : conditional patterns generated from getConditionalPatterns method for respective node
+        :type condPatterns : list
+        :param condTimeStamps: timeStamps of conditional transactions
+        :type condTimeStamps: list
+        :param support : the support of conditional pattern in tree
+        :type support : list
+        """
+        global _minSup, _maxPer
+        pat = []
+        timeStamps = []
+        sup = []
+        data1 = {}
+        count = {}
+        for i in range(len(condPatterns)):
+            for j in condPatterns[i]:
+                if j in data1:
+                    data1[j] = data1[j] + condTimeStamps[i]
+                    count[j] += support[i]
+                else:
+                    data1[j] = condTimeStamps[i]
+                    count[j] = support[i]
+        updatedDict = {}
+        for m in data1:
+            updatedDict[m] = self.getPeriodAndSupport(count[m], data1[m])
+        updatedDict = {k: v for k, v in updatedDict.items() if v[0] >= _minSup and v[1] <= _maxPer}
+        count = 0
+        for p in condPatterns:
+            p1 = [v for v in p if v in updatedDict]
+            trans = sorted(p1, key=lambda x: (updatedDict.get(x)[0]), reverse=True)
+            if len(trans) > 0:
+                pat.append(trans)
+                timeStamps.append(condTimeStamps[count])
+                sup.append(support[count])
+            count += 1
+        return pat, timeStamps, sup, updatedDict
+
+    def generatePatterns(self, prefix: List[str], periodic: Dict) -> None:
+        """
+        Generates the patterns
+
+        :param prefix : forms the combination of items
+        :type prefix : list
+        :return: None
+        """
+
+        global _minSup
+        for i in sorted(self.summaries, key=lambda x: (self.info.get(x)[0])):
+            pattern = prefix[:]
+            pattern.append(i)
+            s = 0
+            for x in self.summaries[i]:
+                s += x.probability
+            periodic[tuple(pattern)] = self.info[i]
+            if s >= _minSup:
+                patterns, timeStamps, support, info = self.getConditionalPatterns(i)
+                conditionalTree = _Tree()
+                conditionalTree.info = info.copy()
+                for pat in range(len(patterns)):
+                    conditionalTree.addConditionalTransaction(patterns[pat], timeStamps[pat], support[pat])
+                if len(patterns) > 0:
+                    conditionalTree.generatePatterns(pattern, periodic)
+            self.removeNode(i)
+
+
+
+[docs] +class UPFPGrowth(_ab._periodicFrequentPatterns): + """ + :Description: Basic is to discover periodic-frequent patterns in a uncertain temporal database. + + :Reference: + Uday Kiran, R., Likhitha, P., Dao, MS., Zettsu, K., Zhang, J. (2021). + Discovering Periodic-Frequent Patterns in Uncertain Temporal Databases. In: + Mantoro, T., Lee, M., Ayu, M.A., Wong, K.W., Hidayanto, A.N. (eds) Neural Information Processing. + ICONIP 2021. Communications in Computer and Information Science, vol 1516. Springer, Cham. + https://doi.org/10.1007/978-3-030-92307-5_83 + + :param iFile: str : + Name of the Input file to mine complete set of Uncertain Periodic Frequent Patterns + :param oFile: str : + Name of the output file to store complete set of Uncertain Periodic Frequent patterns + :param minSup: float: + minimum support thresholds were tuned to find the appropriate ranges in the limited memory + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + :param maxper: float : + where maxPer represents the maximum periodicity threshold value specified by the user. + + + :Attributes: + iFile : file + Name of the Input file or path of the input file + oFile : file + Name of the output file or path of output file + minSup: int or float or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + maxPer: int or float or str + The user can specify maxPer either in count or proportion of database size. + If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + Otherwise, it will be treated as float. + Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float + sep: str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + memoryUSS: float + To store the total amount of USS memory consumed by the program + memoryRSS: float + To store the total amount of RSS memory consumed by the program + startTime: float + To record the start time of the mining process + endTime: float + To record the completion time of the mining process + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + _lno : int + To represent the total no of transaction + tree : class + To represents the Tree class + finalPatterns : dict + To store the complete patterns + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of periodic-frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of periodic-frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets() + Scans the dataset and stores in a list format + PeriodicFrequentOneItem() + Extracts the one-periodic-frequent patterns from database + updateTransaction() + Update the database by removing aperiodic items and sort the Database by item decreased support + buildTree() + After updating the Database, remaining items will be added into the tree by setting root node as null + convert() + To convert the user specified value + removeFalsePositives() + To remove the false positives in generated patterns + + **Executing the code on terminal**: + -------------------------------------------- + .. code-block:: console + + + Format: + + (.venv) $ python3 basic.py <inputFile> <outputFile> <minSup> <maxPer> + + Example Usage: + + (.venv) $ python3 basic.py sampleTDB.txt patterns.txt 0.3 4 + + + .. note:: minSup and maxPer will be considered in support count or frequency + + + **Importing this algorithm into a python program** + ---------------------------------------------------- + .. code-block:: python + + from PAMI.uncertainPeriodicFrequentPattern.basic import UPFPGrowth as alg + + obj = alg.UPFPGrowth(iFile, minSup, maxPer) + + obj.startMine() + + periodicFrequentPatterns = obj.getPatterns() + + print("Total number of Periodic Frequent Patterns:", len(periodicFrequentPatterns)) + + obj.save(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits**: + ------------- + + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + +""" + _rank = {} + _startTime = float() + _endTime = float() + _minSup = float() + _maxPer = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _lno = 0 + _periodic = {} + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + :return: None + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + uncertain, data, ts = [], [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile._columns.values.tolist() + if 'TS' in i: + ts = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + if 'uncertain' in i: + uncertain = self._iFile['uncertain'].tolist() + for k in range(len(data)): + tr = [ts[k]] + for j in range(len(k)): + product = _Item(data[k][j], uncertain[k][j]) + tr.append(product) + self._Database.append(tr) + self._lno += 1 + + # print(self.Database) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line = line.decode("utf-8") + line = line.strip() + line = [i for i in line.split(':')] + temp1 = [i.rstrip() for i in line[0].split(self._sep)] + temp2 = [i.rstrip() for i in line[1].split(self._sep)] + temp1 = [x for x in temp1 if x] + temp2 = [x for x in temp2 if x] + tr = [int(temp1[0])] + for i in range(len(temp1[1:])): + item = temp1[i] + probability = float(temp2[i]) + product = _Item(item, probability) + tr.append(product) + self._lno += 1 + self._Database.append(tr) + else: + try: + count = 0 + with open(self._iFile, 'r') as f: + for line in f: + #count += 1 + line = line.strip() + line = [i for i in line.split(':')] + temp1 = [i.rstrip() for i in line[0].split(self._sep)] + temp2 = [i.rstrip() for i in line[1].split(self._sep)] + temp1 = [x for x in temp1 if x] + temp2 = [x for x in temp2 if x] + tr = [int(temp1[0])] + for i in range(len(temp1[1:])): + item = temp1[i] + probability = float(temp2[i]) + product = _Item(item, probability) + tr.append(product) + self._lno += 1 + self._Database.append(tr) + except IOError: + print("File Not Found") + + def _periodicFrequentOneItem(self) -> Tuple[Dict, List]: + """ + Takes the transactions and calculates the support of each item in the dataset and assign the ranks to the items by decreasing support and returns the frequent items list + :return: Tuple + + """ + mapSupport = {} + for i in self._Database: + n = i[0] + for j in i[1:]: + if j.item not in mapSupport: + mapSupport[j.item] = [round(j.probability, 3), abs(0 - n), n] + else: + mapSupport[j.item][0] += round(j.probability, 3) + mapSupport[j.item][1] = max(mapSupport[j.item][1], abs(n - mapSupport[j.item][2])) + mapSupport[j.item][2] = n + for key in mapSupport: + mapSupport[key][1] = max(mapSupport[key][1], self._lno - mapSupport[key][2]) + mapSupport = {k: [v[0], v[1]] for k, v in mapSupport.items() if v[1] <= self._maxPer and v[0] >= self._minSup} + plist = [k for k, v in sorted(mapSupport.items(), key=lambda x: (x[1][0], x[0]), reverse=True)] + self._rank = dict([(index, item) for (item, index) in enumerate(plist)]) + return mapSupport, plist + + def _check(self, i: List, x: List) -> int: + """ + To check the presence of item or pattern in transaction + + :param x: it represents the pattern + :type x : list + :param i : represents the uncertain transactions + :type i : list + :return: value + :rtype: int + """ + + for m in x: + k = 0 + for n in i: + if m == n.item: + k += 1 + if k == 0: + return 0 + return 1 + + def _getPeriodAndSupport(self, s: float, timeStamps: List[int]) -> List[float]: + """ + To calculate periodicity of timeStamps + + :param s: support of a pattern + :param timeStamps: timeStamps of a pattern + :return: periodicity and Support + """ + global __lno, _maxPer + timeStamps.sort() + cur = 0 + per = 0 + sup = s + for j in range(len(timeStamps)): + per = max(per, timeStamps[j] - cur) + if per > _maxPer: + return [0, 0] + cur = timeStamps[j] + per = max(per, _lno - cur) + return [sup, per] + + def _buildTree(self, data: List[List], info: Dict) -> '_Tree': + """ + It takes the transactions and support of each item and construct the main tree with setting root node as null + + :param data: it represents the one transaction in database + :type data: list + :param info: it represents the support of each item + :type info : dictionary + """ + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(data)): + set1 = [data[i][0]] + rootNode.addTransactions(data[i][1:], set1) + return rootNode + + def _updateTransactions(self, dict1: Dict) -> List[List]: + """ + Remove the items which are not frequent from transactions and updates the transactions with rank of items + + :param dict1 : frequent items with support + :type dict1 : dictionary + :return: list + """ + list1 = [] + for tr in self._Database: + list2 = [int(tr[0])] + for i in range(1, len(tr)): + if tr[i].item in dict1: + list2.append(tr[i]) + if len(list2) >= 2: + basket = list2[1:] + basket.sort(key=lambda val: self._rank[val.item]) + list2[1:] = basket[0:] + list1.append(list2) + return list1 + + def _convert(self, value: Union[int, float, str]) -> Union[int, float]: + """ + To convert the given user specified value + + :param value: user specified value + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = float(value) + if type(value) is str: + if '.' in value: + value = float(value) + else: + value = int(value) + + return value + + def _removeFalsePositives(self) -> None: + """ + + :return: Removes the false positive patterns in generated patterns + """ + periods = {} + for i in self._Database: + for x, y in self._periodic.items(): + if len(x) == 1: + periods[x] = y + else: + s = 1 + check = self._check(i[1:], x) + if check == 1: + for j in i[1:]: + if j.item in x: + s *= j.probability + if x in periods: + periods[x][0] += s + else: + periods[x] = [s, y[1]] + for x, y in periods.items(): + if y[0] >= _minSup: + sample = str() + for i in x: + sample = sample + i + "\t" + self._finalPatterns[sample] = y + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self) -> None: + """ + Main method where the patterns are mined by constructing tree and remove the false patterns + by counting the original support of a patterns. + :return: None + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + Main method where the patterns are mined by constructing tree and remove the false patterns + by counting the original support of a patterns. + :return: None + """ + global _lno, _maxPer, _minSup, _first, _last, periodic + self._startTime = _ab._time.time() + self._creatingItemSets() + self._finalPatterns = {} + self._minSup = self._convert(self._minSup) + self._maxPer = self._convert(self._maxPer) + _minSup, _maxPer, _lno = self._minSup, self._maxPer, self._lno + mapSupport, plist = self._periodicFrequentOneItem() + updatedTrans = self._updateTransactions(mapSupport) + info = {k: v for k, v in mapSupport.items()} + Tree1 = self._buildTree(updatedTrans, info) + self._periodic = {} + Tree1.generatePatterns([], self._periodic) + self._removeFalsePositives() + print("Periodic frequent patterns were generated successfully using UPFP algorithm") + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> '_ab._pd.DataFrame': + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataframe = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b[0], b[1]]) + dataframe = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Periodicity']) + return dataframe
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self.oFile = outFile + writer = open(self.oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.strip() + ":" + str(y[0]) + ":" + str(y[1]) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> Dict[str, List[float]]: + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + """ + print("Total number of Uncertain Periodic-Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = UPFPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = UPFPGrowth(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + _ap.mine() + print("Total number of Uncertain Periodic-Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/uncertainPeriodicFrequentPattern/basic/UPFPGrowthPlus.html b/sphinx/_build/html/_modules/PAMI/uncertainPeriodicFrequentPattern/basic/UPFPGrowthPlus.html new file mode 100644 index 000000000..557018ffc --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/uncertainPeriodicFrequentPattern/basic/UPFPGrowthPlus.html @@ -0,0 +1,1016 @@ + + + + + + PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus

+# UPFPGrowthPlus is used to discover periodic-frequent patterns in an uncertain temporal database.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.uncertainPeriodicFrequentPattern.basic import UPFPGrowthPlus as alg
+#
+#             obj = alg.UPFPGrowthPlus(iFile, minSup, maxPer)
+#
+#             obj.startMine()
+#
+#             periodicFrequentPatterns = obj.getPatterns()
+#
+#             print("Total number of uncertain Periodic Frequent Patterns:", len(periodicFrequentPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+
+from PAMI.uncertainPeriodicFrequentPattern.basic import abstract as _ab
+import pandas as pd
+from deprecated import deprecated
+
+from PAMI.uncertainPeriodicFrequentPattern.basic import abstract as _ab
+
+_minSup = float()
+_maxPer = float()
+_lno = int()
+_first = int()
+_last = int()
+
+
+class _Item:
+    """
+    A class used to represent the item with probability in transaction of dataset
+
+    :Attributes:
+
+    item : int or string
+        Represents the name of the item
+    probability : float
+        Represent the existential probability(likelihood presence) of an item
+    """
+
+    def __init__(self, item, probability):
+        self.item = item
+        self.probability = probability
+
+
+
+[docs] +def printTree(root): + """ + To print the tree with nodes with item name, probability, timestamps, and second probability respectively. + + Attributes: + + :param root: Node + :return: print all Tree with nodes with items, probability, parent item, timestamps, second probability respectively. + """ + for x, y in root.children.items(): + print(x, y.item, y.probability, y.parent.item, y.tids, y.secondProbability) + printTree(y)
+ + + +class _Node(object): + """ + A class used to represent the node of frequentPatternTree + + :Attributes: + + item : int + storing item of a node + probability : int + To maintain the expected support of node + parent : node + To maintain the parent of every node + children : list + To maintain the children of node + + :Methods: + + addChild(itemName) + storing the children to their respective parent nodes + """ + + def __init__(self, item, children): + self.item = item + self.probability = 1 + self.secondProbability = 1 + self.p = 1 + self.children = children + self.parent = None + self.TimeStamps = [] + + def addChild(self, node): + """ + To add children details to parent node + + :param node: children node + :return: update parent node children + """ + self.children[node.item] = node + node.parent = self + + +class _Tree(object): + """ + A class used to represent the frequentPatternGrowth tree structure + + Attributes: + + root: Node + Represents the root node of the tree + summaries: dictionary + storing the nodes with same item name + info: dictionary + stores the support of items + + + :Methods: + + addTransaction(transaction) + creating transaction as a branch in Tree + addConditionalTransaction(prefixPaths, supportOfItems) + construct the conditional tree for prefix paths + getConditionalPatterns(Node) + generates the conditional patterns from tree for specific node + conditionalTransactions(prefixPaths,Support) + takes the prefixPath of a node and support at child of the path and extract the frequent items from prefixPaths and generates prefixPaths with items which are frequent + remove(Node) + removes the node from tree once after generating all the patterns respective to the node + generatePatterns(Node) + starts from the root node of the tree and mines the frequent patterns + + """ + + def __init__(self): + self.root = _Node(None, {}) + self.summaries = {} + self.info = {} + + + def addTransaction(self, transaction, tid): + """ + Adding transaction into tree + + :param transaction : it represents the one transaction in database + :type transaction : list + :param tid : the timestamp of transaction + :type tid : list + """ + currentNode = self.root + k = 0 + for i in range(len(transaction)): + k += 1 + if transaction[i].item not in currentNode.children: + newNode = _Node(transaction[i].item, {}) + newNode.k = k + newNode.secondProbability = transaction[i].probability + l1 = i - 1 + temp = [] + while l1 >= 0: + temp.append(transaction[l1].probability) + l1 -= 1 + if len(temp) == 0: + newNode.probability = round(transaction[i].probability, 2) + else: + newNode.probability = round(max(temp) * transaction[i].probability, 2) + currentNode.addChild(newNode) + if transaction[i].item in self.summaries: + self.summaries[transaction[i].item].append(newNode) + else: + self.summaries[transaction[i].item] = [newNode] + currentNode = newNode + else: + currentNode = currentNode.children[transaction[i].item] + currentNode.secondProbability = max(transaction[i].probability, currentNode.secondProbability) + currentNode.k = k + l1 = i - 1 + temp = [] + while l1 >= 0: + temp.append(transaction[l1].probability) + l1 -= 1 + if len(temp) == 0: + currentNode.probability += round(transaction[i].probability, 2) + else: + nn = max(temp) * transaction[i].probability + currentNode.probability += round(nn, 2) + currentNode.TimeStamps = currentNode.TimeStamps + tid + + def addConditionalPatterns(self, transaction, tid, sup, probability): + """ + Constructing conditional tree from prefixPaths + + :param transaction : it represents the one transaction in database + :type transaction : list + :param tid : timestamps of a pattern or transaction in tree + :param tid : list + :param sup : support of prefixPath taken at last child of the path + :type sup : int + :para probability : highest existential probability value among all periodic-frequent items + :type probability : list + """ + currentNode = self.root + k = 0 + for i in range(len(transaction)): + k += 1 + if transaction[i] not in currentNode.children: + newNode = _Node(transaction[i], {}) + newNode.k = k + newNode.probability = sup + newNode.secondProbability = probability + currentNode.addChild(newNode) + if transaction[i] in self.summaries: + self.summaries[transaction[i]].append(newNode) + else: + self.summaries[transaction[i]] = [newNode] + currentNode = newNode + else: + currentNode = currentNode.children[transaction[i]] + currentNode.k = k + currentNode.probability += sup + currentNode.secondProbability = max(probability, currentNode.secondProbability) + currentNode.TimeStamps = currentNode.TimeStamps + tid + + def conditionalPatterns(self, alpha): + """ + Generates all the conditional patterns of respective node + + :param alpha : it represents the Node in tree + :type alpha : Node + """ + finalPatterns = [] + finalSets = [] + sup = [] + prob = [] + for i in self.summaries[alpha]: + set1 = i.TimeStamps + s = i.probability + p = i.secondProbability + set2 = [] + while i.parent.item is not None: + set2.append(i.parent.item) + i = i.parent + if len(set2) > 0: + set2.reverse() + finalPatterns.append(set2) + finalSets.append(set1) + sup.append(s) + prob.append(p) + finalPatterns, finalSets, support, prob, info = self.conditionalTransactions(finalPatterns, finalSets, sup, prob) + return finalPatterns, finalSets, support, prob, info + + def removeNode(self, nodeValue): + """ + Removing the node from tree + + :param nodeValue : it represents the node in tree + :type nodeValue : node + """ + for i in self.summaries[nodeValue]: + i.parent.TimeStamps = i.parent.TimeStamps + i.TimeStamps + del i.parent.children[nodeValue] + + def getPeriodAndSupport(self, support, TimeStamps): + """ + To calculate the periodicity of given timestamps + + :param support: support of pattern + :param TimeStamps: timmeStamps of a pattern + :return: support and period + """ + global _maxPer + global _lno + TimeStamps.sort() + cur = 0 + per = 0 + sup = support + for j in range(len(TimeStamps)): + per = max(per, TimeStamps[j] - cur) + if per > _maxPer: + return [0, 0] + cur = TimeStamps[j] + per = max(per, _lno - cur) + return [sup, per] + + def conditionalTransactions(self, conditionalPatterns, conditionalTimeStamps, support, probability): + """ + It generates the conditional patterns with frequent items + + :param conditionalPatterns : conditional patterns generated from conditionalPatterns() method for respective node + :type conditionalPatterns : list + :param conditionalTimeStamps : timestamps of respective conditional timestamps + :type conditionalTimeStamps : list + :param support : the support of conditional pattern in tree + :type support : list + :para probability : highest existential probability value among all periodic-frequent items + :type probability : list + """ + global _minSup, _maxPer, _lno + pat = [] + TimeStamps = [] + sup = [] + prob = [] + data1 = {} + count = {} + for i in range(len(conditionalPatterns)): + for j in conditionalPatterns[i]: + if j in data1: + data1[j] = data1[j] + conditionalTimeStamps[i] + count[j] += support[i] + else: + data1[j] = conditionalTimeStamps[i] + count[j] = support[i] + updatedDict = {} + for m in data1: + updatedDict[m] = self.getPeriodAndSupport(count[m], data1[m]) + updatedDict = {k: v for k, v in updatedDict.items() if v[0] >= _minSup and v[1] <= _maxPer} + count = 0 + for p in conditionalPatterns: + p1 = [v for v in p if v in updatedDict] + trans = sorted(p1, key=lambda x: (updatedDict.get(x)[0]), reverse=True) + if len(trans) > 0: + pat.append(trans) + TimeStamps.append(conditionalTimeStamps[count]) + sup.append(support[count]) + prob.append(probability[count]) + count += 1 + return pat, TimeStamps, sup, prob, updatedDict + + def generatePatterns(self, prefix, periodic): + """ + Generates the patterns + + :param prefix : forms the combination of items + :type prefix : list + :para periodic : occurring at intervals + :type periodic : list + """ + global _minSup + for i in sorted(self.summaries, key=lambda x: (self.info.get(x)[0])): + pattern = prefix[:] + pattern.append(i) + s = 0 + secProb = [] + kk = int() + for x in self.summaries[i]: + if x.k <= 2: + s += x.probability + elif x.k >= 3: + n = x.probability * pow(x.secondProbability, (x.k - 2)) + s += n + periodic[tuple(pattern)] = self.info[i] + periodic[tuple(pattern)] = self.info[i] + if s >= _minSup: + periodic[tuple(pattern)] = self.info[i] + patterns, TimeStamps, support, probability, info = self.conditionalPatterns(i) + conditionalTree = _Tree() + conditionalTree.info = info.copy() + for pat in range(len(patterns)): + conditionalTree.addConditionalPatterns(patterns[pat], TimeStamps[pat], support[pat], probability[pat]) + if len(patterns) > 0: + conditionalTree.generatePatterns(pattern, periodic) + self.removeNode(i) + +
+[docs] +class UPFPGrowthPlus(_ab._periodicFrequentPatterns): + """ + :Description: Basic Plus is to discover periodic-frequent patterns in a uncertain temporal database. + + :Reference: + Palla Likhitha, Rage Veena,Rage Uday Kiran, Koji Zettsu, Masashi Toyoda, Philippe Fournier-Viger, (2023). + UPFP-growth++: An Efficient Algorithm to Find Periodic-Frequent Patterns in Uncertain Temporal Databases. + ICONIP 2022. Communications in Computer and Information Science, vol 1792. Springer, Singapore. + https://doi.org/10.1007/978-981-99-1642-9_16 + + :param iFile: str : + Name of the Input file to mine complete set of Uncertain Periodic Frequent Patterns + :param oFile: str : + Name of the output file to store complete set of Uncertain Periodic Frequent patterns + :param minSup: str: + minimum support thresholds were tuned to find the appropriate ranges in the limited memory + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + :param maxper: floot : + where maxPer represents the maximum periodicity threshold value specified by the user. + + + :Attributes: + + iFile: file + Name of the Input file or path of input file + oFile: file + Name of the output file or path of output file + minSup: int or float or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + maxPer: int or float or str + The user can specify maxPer either in count or proportion of database size. + If the program detects the data type of maxPer is integer, then it treats maxPer is expressed in count. + Otherwise, it will be treated as float. + Example: maxPer=10 will be treated as integer, while maxPer=10.0 will be treated as float + sep: str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + memoryUSS: float + To store the total amount of USS memory consumed by the program + memoryRSS: float + To store the total amount of RSS memory consumed by the program + startTime: float + To record the start time of the mining process + endTime: float + To record the completion time of the mining process + Database: list + To store the transactions of a database in list + mapSupport: Dictionary + To maintain the information of item and their frequency + lno: int + To represent the total no of transaction + tree: class + To represents the Tree class + itemSetCount: int + To represents the total no of patterns + finalPatterns: dict + To store the complete patterns + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + savePatterns(oFile) + Complete set of periodic-frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of periodic-frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets(fileName) + Scans the dataset and stores in a list format + updateDatabases() + Update the database by removing aperiodic items and sort the Database by item decreased support + buildTree() + After updating the Database, remaining items will be added into the tree by setting root node as null + convert() + to convert the user specified value + PeriodicFrequentOneItems() + To extract the one-length periodic-frequent items + + **Executing the code on terminal**: + -------------------------------------------- + + .. code-block:: console + + + Format: + + (.venv) $ python3 UPFPGrowthPlus.py <inputFile> <outputFile> <minSup> <maxPer> + + Examples Usage: + + (.venv) $ python3 UPFPGrowthPlus.py sampleTDB.txt patterns.txt 0.3 4 + + + .. note:: minSup and maxPer will be considered in support count or frequency + + + **Importing this algorithm into a python program** + ----------------------------------------------------------------- + .. code-block:: python + + from PAMI.uncertainPeriodicFrequentPattern import UPFPGrowthPlus as alg + + obj = alg.UPFPGrowthPlus(iFile, minSup, maxPer) + + obj.startMine() + + periodicFrequentPatterns = obj.getPatterns() + + print("Total number of uncertain Periodic Frequent Patterns:", len(periodicFrequentPatterns)) + + obj.save(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + + **Credits**: + -------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran.\n + + """ + _startTime = float() + _endTime = float() + _minSup = float() + _maxPer = float() + _finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _rank = {} + _lno = 0 + _periodic = {} + + def _creatingItemSets(self): + """ + Storing the complete transactions of the database/input file in a database variable + """ + + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + uncertain, data, ts = [], [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'TS' in i: + ts = self._iFile['TS'].tolist() + if 'Transactions' in i: + data = self._iFile['Transactions'].tolist() + if 'uncertain' in i: + uncertain = self._iFile['uncertain'].tolist() + for k in range(len(data)): + tr = [ts[k]] + for j in range(len(k)): + product = _Item(data[k][j], uncertain[k][j]) + tr.append(product) + self._Database.append(tr) + self._lno += 1 + + # print(self.Database) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line = line.decode("utf-8") + line = line.strip() + line = [i for i in line.split(':')] + temp1 = [i.rstrip() for i in line[0].split(self._sep)] + temp2 = [i.rstrip() for i in line[1].split(self._sep)] + temp1 = [x for x in temp1 if x] + temp2 = [x for x in temp2 if x] + tr = [int(temp1[0])] + for i in range(len(temp1[1:])): + item = temp1[i] + probability = float(temp2[i]) + product = _Item(item, probability) + tr.append(product) + self._lno += 1 + self._Database.append(tr) + else: + try: + count = 0 + with open(self._iFile, 'r') as f: + for line in f: + line = line.strip() + line = [i for i in line.split(':')] + temp1 = [i.rstrip() for i in line[0].split(self._sep)] + temp2 = [i.rstrip() for i in line[1].split(self._sep)] + temp1 = [x for x in temp1 if x] + temp2 = [x for x in temp2 if x] + tr = [int(temp1[0])] + for i in range(len(temp1[1:])): + item = temp1[i] + probability = float(temp2[i]) + product = _Item(item, probability) + tr.append(product) + self._lno += 1 + self._Database.append(tr) + except IOError: + print("File Not Found") + + def _PeriodicFrequentOneItems(self): + """ + Takes the transactions and calculates the support of each item in the dataset and assign the ranks to the items by decreasing support and returns the frequent items list + """ + global first, last + mapSupport = {} + for i in self._Database: + n = int(i[0]) + for j in i[1:]: + if j.item not in mapSupport: + mapSupport[j.item] = [round(j.probability, 3), abs(0 - n), n] + else: + mapSupport[j.item][0] += round(j.probability, 2) + mapSupport[j.item][1] = max(mapSupport[j.item][1], abs(n - mapSupport[j.item][2])) + mapSupport[j.item][2] = n + for key in mapSupport: + mapSupport[key][1] = max(mapSupport[key][1], self._lno - mapSupport[key][2]) + mapSupport = {k: [round(v[0], 2), v[1]] for k, v in mapSupport.items() if + v[1] <= self._maxPer and v[0] >= self._minSup} + plist = [k for k, v in sorted(mapSupport.items(), key=lambda x: (x[1][0], x[0]), reverse=True)] + self._rank = dict([(index, item) for (item, index) in enumerate(plist)]) + return mapSupport, plist + + def _buildTree(self, data, info): + """ + It takes the transactions and support of each item and construct the main tree with setting root node as null + + :param data : it represents the one transaction in database + :type data : list + :param info : it represents the support of each item + :type info : dictionary + """ + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(data)): + set1 = [data[i][0]] + rootNode.addTransaction(data[i][1:], set1) + #printTree(rootNode) + #print("....") + return rootNode + + def _updateTransactions(self, dict1): + """ + Remove the items which are not frequent from transactions and updates the transactions with rank of items + + :param dict1 : frequent items with support + :type dict1 : dictionary + """ + list1 = [] + for tr in self._Database: + list2 = [int(tr[0])] + for i in range(1, len(tr)): + if tr[i].item in dict1: + list2.append(tr[i]) + if len(list2) >= 2: + basket = list2[1:] + basket.sort(key=lambda val: self._rank[val.item]) + list2[1:] = basket[0:] + list1.append(list2) + return list1 + + def _Check(self, i, x): + """ + To check the presence of item or pattern in transaction + + :param x: it represents the pattern + :type x : list + :param i : represents the uncertain transactions + :type i : list + """ + for m in x: + k = 0 + for n in i: + if m == n.item: + k += 1 + if k == 0: + return 0 + return 1 + + def _convert(self, value): + """ + + To convert the given user specified value + + :param value: user specified value + :return: converted value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = float(value) + if type(value) is str: + if '.' in value: + value = float(value) + else: + value = int(value) + return value + + def _removeFalsePositives(self): + """ + To remove false positives in generated patterns + :return: original patterns + """ + periods = {} + for i in self._Database: + for x, y in self._periodic.items(): + if len(x) == 1: + periods[x] = y + else: + s = 1 + check = self._Check(i[1:], x) + if check == 1: + for j in i[1:]: + if j.item in x: + s *= j.probability + if x in periods: + periods[x][0] += s + else: + periods[x] = [s, y[1]] + count = 0 + for x, y in periods.items(): + if y[0] >= _minSup: + count += 1 + sample = str() + for i in x: + sample = sample + i + " " + self._finalPatterns[sample] = y + #print("Total false patterns generated:", len(self._periodic) - count) + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self): + """ + Main method where the patterns are mined by constructing tree and remove the false patterns by counting the original support of a patterns + """ + self.mine()
+ + +
+[docs] + def mine(self): + """ + Main method where the patterns are mined by constructing tree and remove the false patterns by counting the original support of a patterns + """ + global _minSup, _maxPer, _first, _last, _lno + self._startTime = _ab._time.time() + self._creatingItemSets() + self._minSup = self._convert(self._minSup) + self._maxPer = self._convert(self._maxPer) + self._finalPatterns = {} + _minSup, _maxPer, _lno = self._minSup, self._maxPer, len(self._Database) + mapSupport, plist = self._PeriodicFrequentOneItems() + updatedTrans = self._updateTransactions(mapSupport) + info = {k: v for k, v in mapSupport.items()} + root = self._buildTree(updatedTrans, info) + self._periodic = {} + root.generatePatterns([], self._periodic) + self._removeFalsePositives() + print("Periodic Frequent patterns were generated successfully using UPFP-Growth++ algorithm") + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self._memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self): + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function. + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self): + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self): + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self): + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataframe = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a, b[0], b[1]]) + dataframe = _ab._pd.DataFrame(data, columns=['Patterns', 'Support', 'Periodicity']) + return dataframe
+ + +
+[docs] + def save(self, outFile): + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self): + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self): + """ + This function is used to print the results + """ + print("Total number of Uncertain Periodic-Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 5 or len(_ab._sys.argv) == 6: + if len(_ab._sys.argv) == 6: + _ap = UPFPGrowthPlus(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + if len(_ab._sys.argv) == 5: + _ap = UPFPGrowthPlus(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4]) + _ap.startMine() + _ap.mine() + _Patterns = _ap.getPatterns() + print("Total number of Patterns:", len(_Patterns)) + _ap.savePatterns(_ab._sys.argv[2]) + # print(ap.getPatternsAsDataFrame()) + _memUSS = _ap.getMemoryUSS() + print("Total Memory in USS:", _memUSS) + _memRSS = _ap.getMemoryRSS() + print("Total Memory in RSS", _memRSS) + _run = _ap.getRuntime() + print("Total ExecutionTime in ms:", _run) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/weightedFrequentNeighbourhoodPattern/basic/SWFPGrowth.html b/sphinx/_build/html/_modules/PAMI/weightedFrequentNeighbourhoodPattern/basic/SWFPGrowth.html new file mode 100644 index 000000000..ddc2a4b10 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/weightedFrequentNeighbourhoodPattern/basic/SWFPGrowth.html @@ -0,0 +1,922 @@ + + + + + + PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth

+# SWFPGrowth is an algorithm to mine the weighted spatial frequent patterns in spatiotemporal databases.
+#
+# **Importing this algorithm into a python program**
+# -------------------------------------------------------
+#
+#             from PAMI.weightFrequentNeighbourhoodPattern.basic import SWFPGrowth as alg
+#
+#             obj = alg.SWFPGrowth(iFile, wFile, nFile, minSup, minWeight, sep)
+#
+#             obj.startMine()
+#
+#             frequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(frequentPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.weightedFrequentNeighbourhoodPattern.basic import abstract as _fp
+import pandas as pd
+from deprecated import deprecated
+from typing import List, Dict, Tuple, Union, Iterable
+
+_minWS = str()
+_weights = {}
+_rank = {}
+_neighbourList = {}
+
+_fp._sys.setrecursionlimit(20000)
+
+
+class _WeightedItem:
+    """
+    A class used to represent the weight of the item
+
+    :Attributes:
+
+        item: str
+            storing item of the frequent pattern
+        weight: float
+            stores the weight of the item
+
+    """
+    def __init__(self, item: str, weight: float) -> None:
+        self.item = item
+        self.weight = weight
+
+
+class _Node:
+    """
+    A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+
+        itemId: int
+            storing item of a node
+        counter: int
+            To maintain the support of node
+        parent: node
+            To maintain the parent of node
+        children: list
+            To maintain the children of node
+
+    :Methods:
+
+        addChild(node)
+            Updates the nodes children list and parent for the given node
+
+    """
+
+    def __init__(self, item: str, children: Dict[str, '_Node']) -> None:
+        self.itemId = item
+        self.counter = 1
+        self.weight = 0
+        self.parent = None
+        self.children = children
+
+    def addChild(self, node: '_Node') -> None:
+        """
+        Retrieving the child from the tree
+
+        :param node: Children node.
+        :type node: Node
+        :return: Updates the children nodes and parent nodes
+        :return: None
+
+        """
+        self.children[node.itemId] = node
+        node.parent = self
+
+
+class _Tree:
+    """
+    A class used to represent the frequentPatternGrowth tree structure
+
+    :Attributes:
+
+        root : Node
+            The first node of the tree set to Null.
+        summaries : dictionary
+            Stores the nodes itemId which shares same itemId
+        info : dictionary
+            frequency of items in the transactions
+
+    :Methods:
+
+        addTransaction(transaction, freq)
+            adding items of  transactions into the tree as nodes and freq is the count of nodes
+        getFinalConditionalPatterns(node)
+            getting the conditional patterns from fp-tree for a node
+        getConditionalPatterns(patterns, frequencies)
+            sort the patterns by removing the items with lower minWS
+        generatePatterns(prefix)
+            generating the patterns from fp-tree
+    """
+
+    def __init__(self) -> None:
+        self.root = _Node(None, {})
+        self.summaries = {}
+        self.info = {}
+
+    def addTransaction(self, transaction: List[_WeightedItem], count: int) -> None:
+        """
+        Adding transaction into tree
+
+        :param transaction: it represents the one transaction in database
+        :type transaction: list
+        :param count: frequency of item
+        :type count: int
+        :return: None
+        """
+
+        # This method takes transaction as input and returns the tree
+        global _neighbourList, _rank
+        currentNode = self.root
+        for i in range(len(transaction)):
+            wei = 0
+            l1 = i
+            while l1 >= 0:
+                wei += transaction[l1].weight
+                l1 -= 1
+            if transaction[i].item not in currentNode.children:
+                newNode = _Node(transaction[i].item, {})
+                newNode.freq = count
+                newNode.weight = wei
+                currentNode.addChild(newNode)
+                if _rank[transaction[i].item] in self.summaries:
+                    self.summaries[_rank[transaction[i].item]].append(newNode)
+                else:
+                    self.summaries[_rank[transaction[i].item]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i].item]
+                currentNode.freq += count
+                currentNode.weight += wei
+
+    def addConditionalPattern(self, transaction: List[_WeightedItem], count: int) -> None:
+        """
+        Adding transaction into tree
+
+        :param transaction: it represents the one transaction in database
+        :type transaction: list
+        :param count: frequency of item
+        :type count: int
+        :return : None
+        """
+        # This method takes transaction as input and returns the tree
+        global _neighbourList, _rank
+        currentNode = self.root
+        for i in range(len(transaction)):
+            wei = 0
+            l1 = i
+            while l1 >= 0:
+                wei += transaction[l1].weight
+                l1 -= 1
+            if transaction[i].itemId not in currentNode.children:
+                newNode = _Node(transaction[i].itemId, {})
+                newNode.freq = count
+                newNode.weight = wei
+                currentNode.addChild(newNode)
+                if _rank[transaction[i].itemId] in self.summaries:
+                    self.summaries[_rank[transaction[i].itemId]].append(newNode)
+                else:
+                    self.summaries[_rank[transaction[i].itemId]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i].itemId]
+                currentNode.freq += count
+                currentNode.weight += wei
+
+    def printTree(self, root: _Node) -> None:
+        """
+        To print the details of tree
+
+        :param root: root node of the tree
+        :return: details of tree
+        """
+        if len(root.children) == 0:
+            return
+        else:
+            for x, y in root.children.items():
+                #print(y.itemId, y.parent.itemId, y.freq, y.weight)
+                self.printTree(y)
+
+
+    def getFinalConditionalPatterns(self, alpha: int) -> Tuple[List[List[_Node]], List[float], Dict[int, float]]:
+        """
+        Generates the conditional patterns for a node
+
+        :param alpha: node to generate conditional patterns
+        :return: returns conditional patterns, frequency of each item in conditional patterns
+
+        """
+        finalPatterns = []
+        finalFreq = []
+        global _neighbourList
+        for i in self.summaries[alpha]:
+            set1 = i.weight
+            set2 = []
+            while i.parent.itemId is not None:
+                if i.parent.itemId in _neighbourList[i.itemId]:
+                    set2.append(i.parent)
+                i = i.parent
+            if len(set2) > 0:
+                set2.reverse()
+                finalPatterns.append(set2)
+                finalFreq.append(set1)
+        finalPatterns, finalFreq, info = self.getConditionalTransactions(finalPatterns, finalFreq)
+        return finalPatterns, finalFreq, info
+
+    @staticmethod
+    def getConditionalTransactions(ConditionalPatterns: List[List[_Node]], conditionalFreq: List[float]) -> Tuple[List[List[_Node]], List[float], Dict[int, float]]:
+        """
+        To calculate the frequency of items in conditional patterns and sorting the patterns
+
+        :param ConditionalPatterns: paths of a node
+        :param conditionalFreq: frequency of each item in the path
+        :return: conditional patterns and frequency of each item in transactions
+        """
+        global _rank
+        pat = []
+        freq = []
+        data1 = {}
+        for i in range(len(ConditionalPatterns)):
+            for j in ConditionalPatterns[i]:
+                if j.itemId in data1:
+                    data1[j.itemId] += conditionalFreq[i]
+                else:
+                    data1[j.itemId] = conditionalFreq[i]
+        up_dict = {k: v for k, v in data1.items() if v >= _minWS}
+        count = 0
+        for p in ConditionalPatterns:
+            p1 = [v for v in p if v.itemId in up_dict]
+            trans = sorted(p1, key=lambda x: (up_dict.get(x)), reverse=True)
+            if len(trans) > 0:
+                pat.append(trans)
+                freq.append(conditionalFreq[count])
+            count += 1
+        up_dict = {_rank[k]: v for k, v in up_dict.items()}
+        return pat, freq, up_dict
+
+    def generatePatterns(self, prefix: List[int]) -> Iterable[Tuple[List[int], float]]:
+        """
+        To generate the frequent patterns
+
+        :param prefix: an empty list
+        :return: Frequent patterns that are extracted from fp-tree
+
+        """
+        global _minWS
+        for i in sorted(self.summaries, key=lambda x: (self.info.get(x))):
+            pattern = prefix[:]
+            pattern.append(i)
+            yield pattern, self.info[i]
+            patterns, freq, info = self.getFinalConditionalPatterns(i)
+            conditionalTree = _Tree()
+            conditionalTree.info = info.copy()
+            for pat in range(len(patterns)):
+                conditionalTree.addConditionalPattern(patterns[pat], freq[pat])
+            if len(patterns) > 0:
+                for q in conditionalTree.generatePatterns(pattern):
+                    yield q
+
+
+
+[docs] +class SWFPGrowth(_fp._weightedFrequentSpatialPatterns): + """ + :Description: SWFPGrowth is an algorithm to mine the weighted spatial frequent patterns in spatiotemporal databases. + + :Reference: + R. Uday Kiran, P. P. C. Reddy, K. Zettsu, M. Toyoda, M. Kitsuregawa and P. Krishna Reddy, + "Discovering Spatial Weighted Frequent Itemsets in Spatiotemporal Databases," 2019 International + Conference on Data Mining Workshops (ICDMW), 2019, pp. 987-996, doi: 10.1109/ICDMW.2019.00143. + + :param iFile: str : + Name of the Input file to mine complete set of weighted Frequent Neighbourhood Patterns. + :param oFile: str : + Name of the output file to store complete set of weighted Frequent Neighbourhood Patterns. + :param minSup: int or str or float: + minimum support thresholds were tuned to find the appropriate ranges in the limited memory + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + :param maxper: floot : + where maxPer represents the maximum periodicity threshold value specified by the user. + + + :Attributes: + + iFile : file + Input file name or path of the input file + minWS: float or int or str + The user can specify minWS either in count or proportion of database size. + If the program detects the data type of minWS is integer, then it treats minWS is expressed in count. + Otherwise, it will be treated as float. + Example: minWS=10 will be treated as integer, while minWS=10.0 will be treated as float + minWeight: float or int or str + The user can specify minWeight either in count or proportion of database size. + If the program detects the data type of minWeight is integer, then it treats minWeight is expressed in count. + Otherwise, it will be treated as float. + Example: minWeight=10 will be treated as integer, while minWeight=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default separator is tab space or \t. + However, the users can override their default separator. + oFile : file + Name of the output file or the path of the output file + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transactions + tree : class + it represents the Tree class + finalPatterns : dict + it represents to store the patterns + + :Methods : + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to an output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets() + Scans the dataset or dataframes and stores in list format + frequentOneItem() + Extracts the one-frequent patterns from transactions + + **Methods to execute code on terminal** + ------------------------------------------- + .. code-block:: console + + + Format: + + (.venv) $ python3 SWFPGrowth.py <inputFile> <weightFile> <outputFile> <minSup> <minWeight> + + Example usage : + + (.venv) $ python3 SWFPGrowth.py sampleDB.txt weightFile.txt patterns.txt 10 2 + + + .. note:: minSup will be considered in support count or frequency + + + **Importing this algorithm into a python program** + ---------------------------------------------------- + .. code-block:: python + + from PAMI.weightFrequentNeighbourhoodPattern.basic import SWFPGrowth as alg + + obj = alg.SWFPGrowth(iFile, wFile, nFile, minSup, minWeight, seperator) + + obj.startMine() + + frequentPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(frequentPatterns)) + + obj.save(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getmemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + -------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + __startTime = float() + __endTime = float() + _Weights = {} + _minWS = str() + __finalPatterns = {} + _neighbourList = {} + _iFile = " " + _oFile = " " + _sep = " " + __memoryUSS = float() + __memoryRSS = float() + __Database = [] + __mapSupport = {} + __lno = 0 + __tree = _Tree() + __rank = {} + __rankDup = {} + + def __init__(self, iFile: Union[str, _fp._pd.DataFrame], nFile: Union[str, _fp._pd.DataFrame], minWS: Union[int, float, str], sep='\t') -> None: + super().__init__(iFile, nFile, minWS, sep) + + def __creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + :return: None + """ + self._Database = [] + if isinstance(self._iFile, _fp._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + # print(self.Database) + if isinstance(self._iFile, str): + if _fp._validators.url(self._iFile): + data = _fp._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + line = line.split(':') + temp1 = [i.rstrip() for i in line[0].split(self._sep)] + temp2 = [int(i.strip()) for i in line[1].split(self._sep)] + tr = [] + for i in range(len(temp1)): + we = _WeightedItem(temp1[i], temp2[i]) + tr.append(we) + self._Database.append(tr) + except IOError: + print("File Not Found") + quit() + + def _scanNeighbours(self) -> None: + self._neighbourList = {} + if isinstance(self._nFile, _fp._pd.DataFrame): + data, items = [], [] + if self._nFile.empty: + print("its empty..") + i = self._nFile.columns.values.tolist() + if 'item' in i: + items = self._nFile['items'].tolist() + if 'Neighbours' in i: + data = self._nFile['Neighbours'].tolist() + for k in range(len(items)): + self._neighbourList[items[k][0]] = data[k] + # print(self.Database) + if isinstance(self._nFile, str): + if _fp._validators.url(self._nFile): + data = _fp._urlopen(self._nFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._neighbourList[temp[0]] = temp[1:] + else: + try: + with open(self._nFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._neighbourList[temp[0]] = temp[1:] + except IOError: + print("File Not Found2") + quit() + + def __convert(self, value: Union[int, float, str]) -> Union[int, float]: + """ + to convert the type of user specified minWS value + + :param value: user specified minWS value + :return: converted type + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + def __frequentOneItem(self) -> List[str]: + """ + Generating One frequent items sets + :return: None + """ + global _maxWeight + self._mapSupport = {} + for tr in self._Database: + for i in tr: + nn = [j for j in tr if j.item in self._neighbourList[i.item]] + if i.item not in self._mapSupport: + self._mapSupport[i.item] = i.weight + else: + self._mapSupport[i.item] += i.weight + for k in nn: + self._mapSupport[i.item] += k.weight + self._mapSupport = {k: v for k, v in self._mapSupport.items() if v >= self._minWS} + genList = [k for k, v in sorted(self._mapSupport.items(), key=lambda x: x[1], reverse=True)] + self.__rank = dict([(index, item) for (item, index) in enumerate(genList)]) + return genList + + def __updateTransactions(self, itemSet: List[str]) -> List[List[_WeightedItem]]: + """ + Updates the items in transactions with rank of items according to their support + :Example: oneLength = {'a':7, 'b': 5, 'c':'4', 'd':3} + rank = {'a':0, 'b':1, 'c':2, 'd':3} + :param itemSet: list of one-frequent items + :return: list + """ + list1 = [] + for tr in self._Database: + list2 = [] + for i in range(len(tr)): + if tr[i].item in itemSet: + list2.append(tr[i]) + if len(list2) >= 1: + basket = list2 + basket.sort(key=lambda val: self.__rank[val.item]) + list1.append(basket) + return list1 + + @staticmethod + def __buildTree(transactions: List[List[_WeightedItem]], info: Dict[int, float]) -> _Tree: + """ + Builds the tree with updated transactions + + :param transactions: updated transactions + :param info: support details of each item in transactions. + :return: transactions compressed in fp-tree. + """ + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(transactions)): + rootNode.addTransaction(transactions[i], 1) + return rootNode + + def __savePeriodic(self, itemSet: List[str]) -> str: + """ + The duplication items and their ranks + + :param itemSet: frequent itemSet that generated + :return: patterns with original item names. + + """ + temp = str() + for i in itemSet: + temp = temp + self.__rankDup[i] + "\t" + return temp + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self) -> None: + """ + main program to start the operation + :return : None + + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + main program to start the operation + :return : None + + """ + global _minWS, _neighbourList, _rank + self.__startTime = _fp._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minWS is None: + raise Exception("Please enter the Minimum Support") + self.__creatingItemSets() + self._scanNeighbours() + self._minWS = self.__convert(self._minWS) + _minWS = self._minWS + itemSet = self.__frequentOneItem() + updatedTransactions = self.__updateTransactions(itemSet) + info = {self.__rank[k]: v for k, v in self._mapSupport.items()} + _rank = self.__rank + for x, y in self.__rank.items(): + self.__rankDup[y] = x + _neighbourList = self._neighbourList + #self._neighbourList = {k:v for k, v in self._neighbourList.items() if k in self._mapSupport.keys()} + # for x, y in self._neighbourList.items(): + # xx = [self.__rank[i] for i in y if i in self._mapSupport.keys()] + # _neighbourList[self.__rank[x]] = xx + # print(_neighbourList) + __Tree = self.__buildTree(updatedTransactions, info) + patterns = __Tree.generatePatterns([]) + self.__finalPatterns = {} + for k in patterns: + s = self.__savePeriodic(k[0]) + self.__finalPatterns[str(s)] = k[1] + print("Weighted Frequent patterns were generated successfully using SWFPGrowth algorithm") + self.__endTime = _fp._time.time() + self.__memoryUSS = float() + self.__memoryRSS = float() + process = _fp._psutil.Process(_fp._os.getpid()) + self.__memoryUSS = process.memory_full_info().uss + self.__memoryRSS = process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self.__memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self.__memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self.__endTime - self.__startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _fp._pd.DataFrame: + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataframe = {} + data = [] + for a, b in self.__finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataframe = _fp._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataframe
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self.__finalPatterns.items(): + s1 = x.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> Dict[str, float]: + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self.__finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + :return: None + """ + print("Total number of Weighted Spatial Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_fp._sys.argv) == 7 or len(_fp._sys.argv) == 8: + if len(_fp._sys.argv) == 8: + _ap = SWFPGrowth(_fp._sys.argv[1], _fp._sys.argv[3], _fp._sys.argv[4], _fp._sys.argv[5], _fp._sys.argv[6], + _fp._sys.argv[7]) + if len(_fp._sys.argv) == 7: + _ap = SWFPGrowth(_fp._sys.argv[1], _fp._sys.argv[3], _fp._sys.argv[4], _fp._sys.argv[5], _fp._sys.argv[6]) + _ap.startMine() + _ap.mine() + print("Total number of Weighted Spatial Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_fp._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + _ap = SWFPGrowth('sample.txt', 'neighbourSample.txt', 150, ' ') + _ap.startMine() + print("Total number of Weighted Spatial Frequent Patterns:", len(_ap.getPatterns())) + _ap.save('output.txt') + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/weightedFrequentPattern/basic/WFIM.html b/sphinx/_build/html/_modules/PAMI/weightedFrequentPattern/basic/WFIM.html new file mode 100644 index 000000000..6721ba386 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/weightedFrequentPattern/basic/WFIM.html @@ -0,0 +1,825 @@ + + + + + + PAMI.weightedFrequentPattern.basic.WFIM — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.weightedFrequentPattern.basic.WFIM

+# WFMiner is one of the fundamental algorithm to discover weighted frequent patterns in a transactional database.
+# It stores the database in compressed fp-tree decreasing the memory usage and extracts the
+# patterns from tree.It employs downward closure property to  reduce the search space effectively.
+#
+# **Importing this algorithm into a python program**
+# ----------------------------------------------------------
+#
+#
+#             from PAMI.weightFrequentPattern.basic import basic as alg
+#
+#             obj = alg.basic(iFile, wFile, minSup, minWeight)
+#
+#             obj.startMine()
+#
+#             frequentPatterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(frequentPatterns))
+#
+#             obj.savePatterns(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.weightedFrequentPattern.basic import abstract as _fp
+from typing import List, Dict, Tuple, Union, Generator
+import pandas as pd
+from deprecated import deprecated
+
+
+_minSup = str()
+_minWeight = int()
+_miniWeight = int()
+_maxWeight = int()
+_weights = {}
+_fp._sys.setrecursionlimit(20000)
+
+
+class _Node:
+    """
+    A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+
+        itemId: int
+            storing item of a node
+        counter: int
+            To maintain the support of node
+        parent: node
+            To maintain the parent of node
+        children: list
+            To maintain the children of node
+
+    :Methods:
+
+        addChild(node)
+            Updates the nodes children list and parent for the given node
+    """
+
+    def __init__(self, item: str, children: list) -> None:
+        self.itemId = item
+        self.counter = 1
+        self.parent = None
+        self.children = children
+
+    def addChild(self, node: '_Node') -> None:
+        """
+        Retrieving the child from the tree
+
+        :param node: Children node
+        :type node: Node
+        :return: Updates the children nodes and parent nodes
+        """
+        self.children[node.itemId] = node
+        node.parent = self
+
+
+class _Tree:
+    """
+    A class used to represent the frequentPatternGrowth tree structure
+
+    :Attributes:
+
+        root : Node
+            The first node of the tree set to Null.
+        summaries : dictionary
+            Stores the nodes itemId which shares same itemId
+        info : dictionary
+            frequency of items in the transactions
+
+    :Methods:
+
+        addTransaction(transaction, freq)
+            adding items of  transactions into the tree as nodes and freq is the count of nodes
+        getFinalConditionalPatterns(node)
+            getting the conditional patterns from fp-tree for a node
+        getConditionalPatterns(patterns, frequencies)
+            sort the patterns by removing the items with lower minSup
+        generatePatterns(prefix)
+            generating the patterns from fp-tree
+    """
+
+    def __init__(self) -> None:
+        self.root = _Node(None, {})
+        self.summaries = {}
+        self.info = {}
+
+    def addTransaction(self, transaction: List[str], count: int) -> None:
+        """
+        Adding transaction into tree
+
+        :param transaction: it represents the one transaction in database
+        :type transaction: list
+        :param count: frequency of item
+        :type count: int
+        :return: None
+        """
+        # This method takes transaction as input and returns the tree
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _Node(transaction[i], {})
+                newNode.freq = count
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].append(newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+                currentNode.freq += count
+
+    def getFinalConditionalPatterns(self, alpha: str) -> Tuple[List[List[str]], List[int], Dict[str, int]]:
+        """
+        Generates the conditional patterns for a node
+
+        :param alpha: node to generate conditional patterns
+        :return: returns conditional patterns, frequency of each item in conditional patterns
+        """
+        finalPatterns = []
+        finalFreq = []
+        for i in self.summaries[alpha]:
+            set1 = i.freq
+            set2 = []
+            while i.parent.itemId is not None:
+                set2.append(i.parent.itemId)
+                i = i.parent
+            if len(set2) > 0:
+                set2.reverse()
+                finalPatterns.append(set2)
+                finalFreq.append(set1)
+        finalPatterns, finalFreq, info = self.getConditionalTransactions(finalPatterns, finalFreq)
+        return finalPatterns, finalFreq, info
+
+    @staticmethod
+    def getConditionalTransactions(ConditionalPatterns: List[List[str]], conditionalFreq: List[int]) -> Tuple[List[List[str]], List[int], Dict[str, int]]:
+        """
+        To calculate the frequency of items in conditional patterns and sorting the patterns
+
+        :param ConditionalPatterns: paths of a node
+        :param conditionalFreq: frequency of each item in the path
+        :return: conditional patterns and frequency of each item in transactions
+        """
+        global _minSup, _miniWeight
+        pat = []
+        freq = []
+        data1 = {}
+        for i in range(len(ConditionalPatterns)):
+            for j in ConditionalPatterns[i]:
+                if j in data1:
+                    data1[j] += conditionalFreq[i]
+                else:
+                    data1[j] = conditionalFreq[i]
+        up_dict = {k: v for k, v in data1.items() if v >= _minSup and v * _miniWeight > _minSup}
+        count = 0
+        for p in ConditionalPatterns:
+            p1 = [v for v in p if v in up_dict]
+            trans = sorted(p1, key=lambda x: (up_dict.get(x), -x), reverse=True)
+            if len(trans) > 0:
+                pat.append(trans)
+                freq.append(conditionalFreq[count])
+            count += 1
+        return pat, freq, up_dict
+
+    def generatePatterns(self, prefix: List[str]) -> Generator[Tuple[List[str], int], None, None]:
+        """
+        To generate the frequent patterns
+
+        :param prefix: an empty list
+        :return: Frequent patterns that are extracted from fp-tree
+        """
+        global _miniWeight, _maxWeight, _minWeight, _minSup
+        for i in sorted(self.summaries, key=lambda x: (self.info.get(x), -x)):
+            pattern = prefix[:]
+            pattern.append(i)
+            yield pattern, self.info[i]
+            patterns, freq, info = self.getFinalConditionalPatterns(i)
+            conditionalTree = _Tree()
+            conditionalTree.info = info.copy()
+            for pat in range(len(patterns)):
+                conditionalTree.addTransaction(patterns[pat], freq[pat])
+            if len(patterns) > 0:
+                for q in conditionalTree.generatePatterns(pattern):
+                    yield q
+
+
+
+[docs] +class WFIM(_fp._weightedFrequentPatterns): + """ + :Description: + * WFMiner is one of the fundamental algorithm to discover weighted frequent patterns in a transactional database. + * It stores the database in compressed fp-tree decreasing the memory usage and extracts the patterns from tree.It employs employs downward closure property to reduce the search space effectively. + + :Reference : + U. Yun and J. J. Leggett, “Wfim: weighted frequent itemset mining with a weight range and a minimum weight,” + in Proceedings of the 2005 SIAM International Conference on Data Mining. SIAM, 2005, pp. 636–640. + https://epubs.siam.org/doi/pdf/10.1137/1.9781611972757.76 + + :param iFile: str : + Name of the Input file to mine complete set of weighted Frequent Patterns. + :param oFile: str : + Name of the output file to store complete set of weighted Frequent Patterns. + :param minSup: str or int or float: + minimum support thresholds were tuned to find the appropriate ranges in the limited memory + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + + + :Attributes : + + iFile : file + Input file name or path of the input file + minSup: float or int or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + minWeight: float or int or str + The user can specify minWeight either in count or proportion of database size. + If the program detects the data type of minWeight is integer, then it treats minWeight is expressed in count. + Otherwise, it will be treated as float. + Example: minWeight=10 will be treated as integer, while minWeight=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default separator is tab space or \t. + However, the users can override their default separator. + oFile : file + Name of the output file or the path of the output file + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transactions + tree : class + it represents the Tree class + finalPatterns : dict + it represents to store the patterns + + :Methods : + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets() + Scans the dataset or dataframes and stores in list format + frequentOneItem() + Extracts the one-frequent patterns from transactions + + **Methods to execute code on terminal** + ------------------------------------------- + .. code-block:: console + + + Format: + + (.venv) $ python3 basic.py <inputFile> <weightFile> <outputFile> <minSup> <minWeight> + + Example Usage: + + (.venv) $ python3 basic.py sampleDB.txt weightSample.txt patterns.txt 10.0 3.4 + + + .. note:: minSup and maxPer will be considered in support count or frequency + + + **Importing this algorithm into a python program** + ----------------------------------------------------- + .. code-block:: python + + from PAMI.weightFrequentPattern.basic import basic as alg + + obj = alg.basic(iFile, wFile, minSup, minWeight) + + obj.startMine() + + frequentPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(frequentPatterns)) + + obj.savePatterns(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getmemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + ---------------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + __startTime = float() + __endTime = float() + _minSup = str() + __finalPatterns = {} + _iFile = " " + _oFile = " " + _sep = " " + __memoryUSS = float() + __memoryRSS = float() + __Database = [] + __mapSupport = {} + __lno = 0 + __tree = _Tree() + __rank = {} + __rankDup = {} + + def __init__(self, iFile: str, wFile: str, minSup: str, minWeight: int, sep: str='\t') -> None: + super().__init__(iFile, wFile, minSup, minWeight, sep) + + def __creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + :return: None + """ + self.__Database = [] + if isinstance(self._iFile, _fp._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self.__Database = self._iFile['Transactions'].tolist() + + # print(self.Database) + if isinstance(self._iFile, str): + if _fp._validators.url(self._iFile): + data = _fp._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self.__Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + # print(len(temp)) + self.__Database.append(temp) + except IOError: + print("File Not Found") + quit() + + def _scanningWeights(self) -> None: + """ + Storing the weights of the variables in input file in a weights variable + :return: None + """ + global _weights + _weights = {} + if isinstance(self._wFile, _fp._pd.DataFrame): + items, weights = [], [] + if self._wFile.empty: + print("its empty..") + i = self._wFile.columns.values.tolist() + if 'items' in i: + items = self._wFile['items'].tolist() + if 'weights' in i: + weights = self._wFile['weights'].tolist() + for i in range(len(weights)): + _weights[items[i]] = weights[i] + + # print(self.Database) + if isinstance(self._wFile, str): + if _fp._validators.url(self._wFile): + data = _fp._urlopen(self._wFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + _weights[temp[0]] = temp[1] + else: + try: + with open(self._wFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + s = int(float(temp[1])) + _weights[temp[0]] = s + except IOError: + print("File Not Found") + quit() + + def __convert(self, value: Union[int, float, str]) -> Union[int, float]: + """ + To convert the type of user specified minSup value. + + :param value: user specified minSup value + :return: converted type + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self.__Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self.__Database) * value) + else: + value = int(value) + return value + + def __frequentOneItem(self) -> List[str]: + """ + Generating One frequent items sets + :return: list + """ + global _maxWeight + self.__mapSupport = {} + for tr in self.__Database: + for i in range(0, len(tr)): + if tr[i] not in self.__mapSupport: + self.__mapSupport[tr[i]] = 1 + else: + self.__mapSupport[tr[i]] += 1 + self.__mapSupport = {k: v for k, v in self.__mapSupport.items() if v >= self._minSup and v * _maxWeight > self._minSup} + genList = [k for k, v in sorted(self.__mapSupport.items(), key=lambda x: x[1], reverse=True)] + self.__rank = dict([(index, item) for (item, index) in enumerate(genList)]) + return genList + + def __updateTransactions(self, itemSet: List[str]) -> List[List[int]]: + """ + Updates the items in transactions with rank of items according to their support + :Example: oneLength = {'a':7, 'b': 5, 'c':'4', 'd':3} + rank = {'a':0, 'b':1, 'c':2, 'd':3} + + :param itemSet: list of one-frequent items + :return: list + """ + list1 = [] + for tr in self.__Database: + list2 = [] + for i in range(len(tr)): + if tr[i] in itemSet: + list2.append(self.__rank[tr[i]]) + if len(list2) >= 1: + list2.sort() + list1.append(list2) + return list1 + + @staticmethod + def __buildTree(transactions: List[List[int]], info: Dict[int, int]) -> '_Tree': + """ + Builds the tree with updated transactions + + :param transactions: updated transactions + :param info: support details of each item in transactions. + :return: Transactions compressed in fp-tree + """ + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(transactions)): + rootNode.addTransaction(transactions[i], 1) + return rootNode + + def __savePeriodic(self, itemSet: List[int]) -> str: + """ + The duplication items and their ranks + + :param itemSet: frequent itemSet that generated + :return: patterns with original item names. + """ + temp = str() + for i in itemSet: + temp = temp + self.__rankDup[i] + "\t" + return temp + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self) -> None: + """ + main program to start the operation + :return: None + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + main program to start the operation + :return: None + """ + global _minSup, _minWeight, _miniWeight, _maxWeight, _weights + self.__startTime = _fp._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._minSup is None: + raise Exception("Please enter the Minimum Support") + self.__creatingItemSets() + self._scanningWeights() + _weights = {k: v for k, v in _weights.items() if v >= _minWeight} + _maxWeight = max([s for s in _weights.values()]) + _miniWeight = min([s for s in _weights.values()]) + self._minSup = self.__convert(self._minSup) + _minSup = self._minSup + itemSet = self.__frequentOneItem() + updatedTransactions = self.__updateTransactions(itemSet) + for x, y in self.__rank.items(): + self.__rankDup[y] = x + info = {self.__rank[k]: v for k, v in self.__mapSupport.items()} + __Tree = self.__buildTree(updatedTransactions, info) + patterns = __Tree.generatePatterns([]) + self.__finalPatterns = {} + for k in patterns: + s = self.__savePeriodic(k[0]) + self.__finalPatterns[str(s)] = k[1] + print("Weighted Frequent patterns were generated successfully using basic algorithm") + self.__endTime = _fp._time.time() + self.__memoryUSS = float() + self.__memoryRSS = float() + process = _fp._psutil.Process(_fp._os.getpid()) + self.__memoryUSS = process.memory_full_info().uss + self.__memoryRSS = process.memory_info().rss
+ + + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self.__memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function. + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self.__memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process. + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self.__endTime - self.__startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> pd.DataFrame: + """ + Storing final frequent patterns in a dataframe. + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataframe = {} + data = [] + for a, b in self.__finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataframe = _fp._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataframe
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file. + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self.__finalPatterns.items(): + s1 = x.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> Dict[str, int]: + """ + Function to send the set of frequent patterns after completion of the mining process. + + :return: returning frequent patterns + :rtype: dict + """ + return self.__finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + :return: None + """ + print("Total number of Weighted Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + + + +if __name__ == "__main__": + _ap = str() + if len(_fp._sys.argv) == 6 or len(_fp._sys.argv) == 7: + if len(_fp._sys.argv) == 7: + _ap = WFIM(_fp._sys.argv[1], _fp._sys.argv[3], _fp._sys.argv[4], _fp._sys.argv[5], _fp._sys.argv[6]) + if len(_fp._sys.argv) == 6: + _ap = WFIM(_fp._sys.argv[1], _fp._sys.argv[3], _fp._sys.argv[4], _fp._sys.argv[5]) + _ap.startMine() + _ap.mine() + print("Total number of Weighted Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_fp._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/weightedFrequentRegularPattern/basic/WFRIMiner.html b/sphinx/_build/html/_modules/PAMI/weightedFrequentRegularPattern/basic/WFRIMiner.html new file mode 100644 index 000000000..becd65866 --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/weightedFrequentRegularPattern/basic/WFRIMiner.html @@ -0,0 +1,922 @@ + + + + + + PAMI.weightedFrequentRegularPattern.basic.WFRIMiner — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.weightedFrequentRegularPattern.basic.WFRIMiner

+# WFRIMiner is one of the fundamental algorithm to discover weighted frequent regular patterns in a transactional database.
+# It stores the database in compressed WFRI-tree decreasing the memory usage and extracts the patterns from tree.It employs downward closure property to  reduce the search space effectively.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.weightedFrequentRegularPattern.basic import WFRIMiner as alg
+#
+#             obj = alg.WFRIMiner(iFile, WS, regularity)
+#
+#             obj.startMine()
+#
+#             weightedFrequentRegularPatterns = obj.getPatterns()
+#
+#             print("Total number of Frequent Patterns:", len(weightedFrequentRegularPatterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternInDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see <https://www.gnu.org/licenses/>.
+     Copyright (C)  2021 Rage Uday Kiran
+
+"""
+
+from PAMI.weightedFrequentRegularPattern.basic import abstract as _fp
+import pandas as pd
+from deprecated import deprecated
+from typing import List, Dict
+
+
+_WS = str()
+_regularity = str()
+_lno = int()
+_weights = {}
+_wf = {}
+_fp._sys.setrecursionlimit(20000)
+
+
+class _Node:
+    """
+    A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+        itemId: int
+            storing item of a node
+        counter: int
+            To maintain the support of node
+        parent: node
+            To maintain the parent of node
+        children: list
+            To maintain the children of node
+
+    :Methods:
+        addChild(node)
+            Updates the nodes children list and parent for the given node
+
+    """
+
+    def __init__(self, item: int, children: dict) -> None:
+        """
+        Initializing the Node class
+
+        :param item: Storing the item of a node
+        :type item: int or None
+        :param children: To maintain the children of a node
+        :type children: dict
+        :return: None
+        """
+
+        self.item = item
+        self.children = children
+        self.parent = None
+        self.timeStamps = []
+
+    def addChild(self, node) -> None:
+        """
+        To add the children to a node
+
+        :param node: parent node in the tree
+        :return: None
+        """
+
+        self.children[node.item] = node
+        node.parent = self
+
+
+class _Tree:
+    """
+    A class used to represent the frequentPatternGrowth tree structure
+
+    :Attributes:
+        root : Node
+            The first node of the tree set to Null.
+        summaries : dictionary
+            Stores the nodes itemId which shares same itemId
+        info : dictionary
+            frequency of items in the transactions
+
+    :Methods:
+        addTransaction(transaction, freq)
+            adding items of  transactions into the tree as nodes and freq is the count of nodes
+        getFinalConditionalPatterns(node)
+            getting the conditional patterns from fp-tree for a node
+        getConditionalPatterns(patterns, frequencies)
+            sort the patterns by removing the items with lower minSup
+        generatePatterns(prefix)
+            generating the patterns from fp-tree
+    """
+
+    def __init__(self) -> None:
+        self.root = _Node(None, {})
+        self.summaries = {}
+        self.info = {}
+
+    def addTransaction(self, transaction: list, tid: list) -> None:
+        """
+        Adding a transaction into tree
+
+        :param transaction: To represent the complete database
+        :type transaction: list
+        :param tid: To represent the timestamp of a database
+        :type tid: list
+        :return: pfp-growth tree
+        """
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _Node(transaction[i], {})
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].append(newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+        currentNode.timeStamps = currentNode.timeStamps + tid
+
+    def getConditionalPatterns(self, alpha, pattern) -> tuple:
+        """
+        Generates all the conditional patterns of a respective node
+
+        :param alpha: To represent a Node in the tree
+        :type alpha: Node
+        :param pattern: prefix of the pattern
+        :type alpha: list
+        :return: A tuple consisting of finalPatterns, conditional pattern base and information
+        """
+        finalPatterns = []
+        finalSets = []
+        for i in self.summaries[alpha]:
+            set1 = i.timeStamps
+            set2 = []
+            while i.parent.item is not None:
+                set2.append(i.parent.item)
+                i = i.parent
+            if len(set2) > 0:
+                set2.reverse()
+                finalPatterns.append(set2)
+                finalSets.append(set1)
+        finalPatterns, finalSets, info = self.conditionalDatabases(finalPatterns, finalSets, pattern)
+        return finalPatterns, finalSets, info
+
+    @staticmethod
+    def generateTimeStamps(node) -> list:
+        """
+        To get the timestamps of a node
+
+        :param node: A node in the tree
+        :return: Timestamps of a node
+        """
+
+        finalTimeStamps = node.timeStamps
+        return finalTimeStamps
+
+    def removeNode(self, nodeValue) -> None:
+        """
+        Removing the node from tree
+
+        :param nodeValue: To represent a node in the tree
+        :type nodeValue: node
+        :return: Tree with their nodes updated with timestamps
+        """
+
+        for i in self.summaries[nodeValue]:
+            i.parent.timeStamps = i.parent.timeStamps + i.timeStamps
+            del i.parent.children[nodeValue]
+
+    def getTimeStamps(self, alpha) -> list:
+        """
+        To get all the timestamps of the nodes which share same item name
+
+        :param alpha: Node in a tree
+        :return: Timestamps of a  node
+        """
+        temporary = []
+        for i in self.summaries[alpha]:
+            temporary += i.timeStamps
+        return temporary
+
+    @staticmethod
+    def getSupportAndPeriod(timeStamps: list, pattern: list) -> list:
+        """
+        To calculate the periodicity and support
+
+        :param timeStamps: Timestamps of an item set
+        :type timeStamps: list
+        :param pattern: pattern to evaluate the weighted frequent regular or not
+        :type pattern: list
+        :return: support, periodicity
+        """
+        global _WS, _regularity, _lno, _weights
+        timeStamps.sort()
+        cur = 0
+        per = list()
+        sup = 0
+        for j in range(len(timeStamps)):
+            per.append(timeStamps[j] - cur)
+            cur = timeStamps[j]
+            sup += 1
+        per.append(_lno - cur)
+        l = int()
+        for i in pattern:
+            l = l + _weights[i]
+        wf = (l / (len(pattern))) * sup
+        if len(per) == 0:
+            return [0, 0]
+        return [sup, max(per), wf]
+
+    def conditionalDatabases(self, conditionalPatterns: list, conditionalTimeStamps: list, pattern: list) -> tuple:
+        """
+        It generates the conditional patterns with periodic-frequent items
+
+        :param conditionalPatterns: conditionalPatterns generated from conditionPattern method of a respective node
+        :type conditionalPatterns: list
+        :param conditionalTimeStamps: Represents the timestamps of a conditional patterns of a node
+        :type conditionalTimeStamps: list
+        :param pattern: prefix of the pattern
+        :type pattern: list
+        :returns: Returns conditional transactions by removing non-periodic and non-frequent items
+        """
+        global _WS, _regularity
+        pat = []
+        timeStamps = []
+        data1 = {}
+        for i in range(len(conditionalPatterns)):
+            for j in conditionalPatterns[i]:
+                if j in data1:
+                    data1[j] = data1[j] + conditionalTimeStamps[i]
+                else:
+                    data1[j] = conditionalTimeStamps[i]
+        updatedDictionary = {}
+        for m in data1:
+            updatedDictionary[m] = self.getSupportAndPeriod(data1[m], pattern + [m])
+        updatedDictionary = {k: v for k, v in updatedDictionary.items() if v[0] >= _WS and v[1] <= _regularity}
+        count = 0
+        for p in conditionalPatterns:
+            p1 = [v for v in p if v in updatedDictionary]
+            trans = sorted(p1, key=lambda x: (updatedDictionary.get(x)[0], -x), reverse=True)
+            if len(trans) > 0:
+                pat.append(trans)
+                timeStamps.append(conditionalTimeStamps[count])
+            count += 1
+        return pat, timeStamps, updatedDictionary
+
+    def generatePatterns(self, prefix: list) -> None:
+        """
+        Generates the patterns
+
+        :param prefix: Forms the combination of items
+        :type prefix: list
+        :returns: yields patterns with their support and periodicity
+        """
+        global _WS
+        for i in sorted(self.summaries, key=lambda x: (self.info.get(x)[0], -x)):
+            pattern = prefix[:]
+            pattern.append(i)
+            if self.info[i][2] >= _WS:
+                yield pattern, self.info[i]
+                patterns, timeStamps, info = self.getConditionalPatterns(i, pattern)
+                conditionalTree = _Tree()
+                conditionalTree.info = info.copy()
+                for pat in range(len(patterns)):
+                    conditionalTree.addTransaction(patterns[pat], timeStamps[pat])
+                if len(patterns) > 0:
+                    for q in conditionalTree.generatePatterns(pattern):
+                        yield q
+            self.removeNode(i)
+
+
+
+[docs] +class WFRIMiner(_fp._weightedFrequentRegularPatterns): + """ + :Description: WFRIMiner is one of the fundamental algorithm to discover weighted frequent regular patterns in a transactional database. + * It stores the database in compressed WFRI-tree decreasing the memory usage and extracts the patterns from tree.It employs downward closure property to reduce the search space effectively. + + :Reference: + K. Klangwisan and K. Amphawan, "Mining weighted-frequent-regular itemsets from transactional database," + 2017 9th International Conference on Knowledge and Smart Technology (KST), 2017, pp. 66-71, + doi: 10.1109/KST.2017.7886090. + + :param iFile: str : + Name of the Input file to mine complete set of Weighted Frequent Regular Patterns. + :param oFile: str : + Name of the output file to store complete set of Weighted Frequent Regular Patterns. + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + :param wFile: str : + This is a weighted file. + + :Attributes: + + iFile : file + Input file name or path of the input file + WS: float or int or str + The user can specify WS either in count or proportion of database size. + If the program detects the data type of WS is integer, then it treats WS is expressed in count. + Otherwise, it will be treated as float. + Example: WS=10 will be treated as integer, while WS=10.0 will be treated as float + regularity: float or int or str + The user can specify regularity either in count or proportion of database size. + If the program detects the data type of regularity is integer, then it treats regularity is expressed in count. + Otherwise, it will be treated as float. + Example: regularity=10 will be treated as integer, while regularity=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default separator is tab space or \t. + However, the users can override their default separator. + oFile : file + Name of the output file or the path of the output file + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + it represents the total no of transactions + tree : class + it represents the Tree class + finalPatterns : dict + it represents to store the patterns + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to an output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets() + Scans the dataset or dataframes and stores in list format + frequentOneItem() + Extracts the one-frequent patterns from transactions + + **Methods to execute code on terminal** + ------------------------------------------- + .. code-block:: console + + + Format: + + (.venv) $ python3 WFRIMiner.py <inputFile> <outputFile> <weightSupport> <regularity> + + Example Usage: + + (.venv) $ python3 WFRIMiner.py sampleDB.txt patterns.txt 10 5 + + + .. note:: WS & regularity will be considered in support count or frequency + + + **Importing this algorithm into a python program** + ---------------------------------------------------- + .. code-block:: python + + from PAMI.weightedFrequentRegularpattern.basic import WFRIMiner as alg + + obj = alg.WFRIMiner(iFile, WS, regularity) + + obj.startMine() + + weightedFrequentRegularPatterns = obj.getPatterns() + + print("Total number of Frequent Patterns:", len(weightedFrequentRegularPatterns)) + + obj.save(oFile) + + Df = obj.getPatternInDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + + **Credits:** + ---------------- + The complete program was written by P.Likhitha under the supervision of Professor Rage Uday Kiran. + + """ + + _startTime = float() + _endTime = float() + _WS = str() + _regularity = str() + _weight = {} + _finalPatterns = {} + _wFile = " " + _iFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _mapSupport = {} + _lno = 0 + _tree = _Tree() + _rank = {} + _rankDup = {} + + def __init__(self, iFile, _wFile, WS, regularity, sep='\t') -> None: + super().__init__(iFile, _wFile, WS, regularity, sep) + + def _creatingItemSets(self) -> None: + """ + Storing the complete transactions of the database/input file in a database variable + :return: None + """ + self._Database = [] + self._weight = {} + if isinstance(self._iFile, _fp._pd.DataFrame): + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + + if isinstance(self._wFile, _fp._pd.DataFrame): + _items, _weights = [], [] + if self._wFile.empty: + print("its empty..") + i = self._wFile.columns.values.tolist() + if 'items' in i: + _items = self._wFile['items'].tolist() + if 'weight' in i: + _weights = self._wFile['weight'].tolist() + for i in range(len(_items)): + self._weight[_items[i]] = _weights[i] + + # print(self.Database) + if isinstance(self._iFile, str): + if _fp._validators.url(self._iFile): + data = _fp._urlopen(self._iFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + else: + try: + with open(self._iFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._Database.append(temp) + except IOError: + print("File Not Found") + quit() + + if isinstance(self._wFile, str): + if _fp._validators.url(self._wFile): + data = _fp._urlopen(self._wFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._weight[temp[0]] = float(temp[1]) + else: + try: + with open(self._wFile, 'r', encoding='utf-8') as f: + for line in f: + line.strip() + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._weight[temp[0]] = float(temp[1]) + except IOError: + print("File Not Found") + quit() + + def _convert(self, value) -> float: + """ + To convert the type of user specified minSup value + + :param value: user specified minSup value + :return: converted type + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = float(value) + value = (len(self._Database) * value) + else: + value = int(value) + return value + + def _frequentOneItem(self) -> List[str]: + """ + Generating One frequent items sets + :return: list + """ + global _lno, _wf, _weights + self._mapSupport = {} + _owf = {} + for tr in self._Database: + for i in range(1, len(tr)): + if tr[i] not in self._mapSupport: + self._mapSupport[tr[i]] = [int(tr[0]), int(tr[0]), 1] + else: + self._mapSupport[tr[i]][0] = max(self._mapSupport[tr[i]][0], (int(tr[0]) - self._mapSupport[tr[i]][1])) + self._mapSupport[tr[i]][1] = int(tr[0]) + self._mapSupport[tr[i]][2] += 1 + for key in self._mapSupport: + self._mapSupport[key][0] = max(self._mapSupport[key][0], abs(len(self._Database) - self._mapSupport[key][1])) + _lno = len(self._Database) + self._mapSupport = {k: [v[2], v[0]] for k, v in self._mapSupport.items() if v[0] <= self._regularity} + for x, y in self._mapSupport.items(): + if self._weight.get(x) is None: + self._weight[x] = 0 + gmax = max([self._weight[values] for values in self._mapSupport.keys()]) + for x, y in self._mapSupport.items(): + _owf[x] = y[0] * gmax + self._mapSupport = {k: v for k, v in self._mapSupport.items() if v[0] * _owf[k] >= self._WS} + for x, y in self._mapSupport.items(): + temp = self._weight[x] * y[0] + _wf[x] = temp + self._mapSupport[x].append(temp) + genList = [k for k, v in sorted(self._mapSupport.items(), key=lambda x: x[1], reverse= True)] + self._rank = dict([(index, item) for (item, index) in enumerate(genList)]) + for x, y in self._rank.items(): + _weights[y] = self._weight[x] + return genList + + def _updateTransactions(self, itemSet) -> List[List[int]]: + """ + Updates the items in transactions with rank of items according to their support + + :Example: + oneLength = {'a':7, 'b': 5, 'c':'4', 'd':3} + rank = {'a':0, 'b':1, 'c':2, 'd':3} + + :param itemSet: list of one-frequent items + :return: None + """ + list1 = [] + for tr in self._Database: + list2 = [int(tr[0])] + for i in range(1, len(tr)): + if tr[i] in itemSet: + list2.append(self._rank[tr[i]]) + if len(list2) >= 2: + basket = list2[1:] + basket.sort() + list2[1:] = basket[0:] + list1.append(list2) + return list1 + + @staticmethod + def _buildTree(transactions, info) -> _Tree: + """ + Builds the tree with updated transactions + + :param transactions: updated transactions + :param info: support details of each item in transactions + :return: transactions compressed in fp-tree + + """ + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(transactions)): + set1 = [transactions[i][0]] + rootNode.addTransaction(transactions[i][1:], set1) + return rootNode + + def _savePeriodic(self, itemSet) -> str: + """ + The duplication items and their ranks + + :param itemSet: frequent itemSet that generated + :return: patterns with original item names. + + """ + temp = str() + for i in itemSet: + temp = temp + self._rankDup[i] + "\t" + return temp + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self) -> None: + """ + main program to start the operation + :return: None + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + main program to start the operation + :return: None + """ + global _WS, _regularity, _weights + self._startTime = _fp._time.time() + if self._iFile is None: + raise Exception("Please enter the file path or file name:") + if self._WS is None: + raise Exception("Please enter the Minimum Support") + self._creatingItemSets() + self._WS = self._convert(self._WS) + self._regularity = self._convert(self._regularity) + _WS, _regularity, _weights = self._WS, self._regularity, self._weight + itemSet = self._frequentOneItem() + updatedTransactions = self._updateTransactions(itemSet) + for x, y in self._rank.items(): + self._rankDup[y] = x + info = {self._rank[k]: v for k, v in self._mapSupport.items()} + _Tree = self._buildTree(updatedTransactions, info) + patterns = _Tree.generatePatterns([]) + self._finalPatterns = {} + for k in patterns: + s = self._savePeriodic(k[0]) + self._finalPatterns[str(s)] = k[1] + print("Weighted Frequent Regular patterns were generated successfully using WFRIM algorithm") + self._endTime = _fp._time.time() + self._memoryUSS = float() + self._memoryRSS = float() + process = _fp._psutil.Process(_fp._os.getpid()) + self._memoryRSS = float() + self._memoryUSS = float() + self._memoryUSS = process.memory_full_info().uss + self._memoryRSS = process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + + return self._memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> _fp._pd.DataFrame: + """ + Storing final frequent patterns in a dataframe + + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + + dataframe = {} + data = [] + for a, b in self._finalPatterns.items(): + data.append([a.replace('\t', ' '), b]) + dataframe = _fp._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataframe
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: name of the output file + :type outFile: csv file + :return: None + """ + self._oFile = outFile + writer = open(self._oFile, 'w+') + for x, y in self._finalPatterns.items(): + s1 = x.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> Dict[str, float]: + """ + Function to send the set of frequent patterns after completion of the mining process + + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + :return: None + """ + print("Total number of Weighted Frequent Regular Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_fp._sys.argv) == 6 or len(_fp._sys.argv) == 7: + if len(_fp._sys.argv) == 7: + _ap = WFRIMiner(_fp._sys.argv[1], _fp._sys.argv[3], _fp._sys.argv[4], _fp._sys.argv[5], _fp._sys.argv[6]) + if len(_fp._sys.argv) == 5: + _ap = WFRIMiner(_fp._sys.argv[1], _fp._sys.argv[3], _fp._sys.argv[4], _fp._sys.argv[5]) + _ap.startMine() + _ap.mine() + print("Total number of Weighted Frequent Regular Patterns:", len(_ap.getPatterns())) + _ap.save(_fp._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/PAMI/weightedUncertainFrequentPattern/basic/WUFIM.html b/sphinx/_build/html/_modules/PAMI/weightedUncertainFrequentPattern/basic/WUFIM.html new file mode 100644 index 000000000..b1dd1c4da --- /dev/null +++ b/sphinx/_build/html/_modules/PAMI/weightedUncertainFrequentPattern/basic/WUFIM.html @@ -0,0 +1,961 @@ + + + + + + PAMI.weightedUncertainFrequentPattern.basic.WUFIM — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for PAMI.weightedUncertainFrequentPattern.basic.WUFIM

+# WUFIM is one of the algorithm to discover weighted frequent patterns in an uncertain transactional database using PUF-Tree.
+#
+# **Importing this algorithm into a python program**
+# --------------------------------------------------------
+#
+#
+#             from PAMI.weightedUncertainFrequentPattern.basic import basic as alg
+#
+#             obj = alg.basic(iFile, wFile, minSup, sep)
+#
+#             obj.startMine()
+#
+#             Patterns = obj.getPatterns()
+#
+#             print("Total number of  Patterns:", len(Patterns))
+#
+#             obj.save(oFile)
+#
+#             Df = obj.getPatternsAsDataFrame()
+#
+#             memUSS = obj.getMemoryUSS()
+#
+#             print("Total Memory in USS:", memUSS)
+#
+#             memRSS = obj.getMemoryRSS()
+#
+#             print("Total Memory in RSS", memRSS)
+#
+#             run = obj.getRuntime()
+#
+#             print("Total ExecutionTime in seconds:", run)
+#
+
+
+
+__copyright__ = """
+ Copyright (C)  2021 Rage Uday Kiran
+
+     This program is free software: you can redistribute it and/or modify
+     it under the terms of the GNU General Public License as published by
+     the Free Software Foundation, either version 3 of the License, or
+     (at your option) any later version.
+
+     This program is distributed in the hope that it will be useful,
+     but WITHOUT ANY WARRANTY; without even the implied warranty of
+     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+     GNU General Public License for more details.
+
+     You should have received a copy of the GNU General Public License
+     along with this program.  If not, see `<https://www.gnu.org/licenses/>`_.
+     
+"""
+
+from PAMI.weightedUncertainFrequentPattern.basic import abstract as _ab
+import pandas as pd
+from deprecated import deprecated
+
+_expSup = str()
+_expWSup = str()
+_weights = {}
+_finalPatterns = {}
+_ab._sys.setrecursionlimit(20000)
+class _Item:
+    """
+    A class used to represent the item with probability in transaction of dataset
+
+    :Attributes:
+
+        item : int or word
+            Represents the name of the item
+
+        probability : float
+            Represent the existential probability(likelihood presence) of an item
+    """
+
+    def __init__(self, item: int, probability: float) -> None:
+        self.item = item
+        self.probability = probability
+
+
+class _Node(object):
+    """
+    A class used to represent the node of frequentPatternTree
+
+    :Attributes:
+
+        item : int
+            storing item of a node
+        probability : int
+            To maintain the expected support of node
+        parent : node
+            To maintain the parent of every node
+        children : list
+            To maintain the children of node
+
+    :Methods:
+
+        addChild(itemName)
+            storing the children to their respective parent nodes
+    """
+
+    def __init__(self, item, children: list) -> None:
+        self.item = item
+        self.probability = 1
+        self.children = children
+        self.parent = None
+
+    def addChild(self, node) -> None:
+        self.children[node.item] = node
+        node.parent = self
+
+
+class _Tree(object):
+    """
+    A class used to represent the frequentPatternGrowth tree structure
+
+    :Attributes:
+
+        root : Node
+            Represents the root node of the tree
+        summaries : dictionary
+            storing the nodes with same item name
+        info : dictionary
+            stores the support of items
+
+    :Methods:
+
+        addTransaction(transaction)
+            creating transaction as a branch in frequentPatternTree
+        addConditionalPattern(prefixPaths, supportOfItems)
+            construct the conditional tree for prefix paths
+        conditionalPatterns(Node)
+            generates the conditional patterns from tree for specific node
+        conditionalTransactions(prefixPaths,Support)
+            takes the prefixPath of a node and support at child of the path and extract the frequent items from prefixPaths and generates prefixPaths with items which are frequent
+        remove(Node)
+            removes the node from tree once after generating all the patterns respective to the node
+        generatePatterns(Node)
+            starts from the root node of the tree and mines the frequent patterns
+
+    """
+
+    def __init__(self) -> None:
+        self.root = _Node(None, {})
+        self.summaries = {}
+        self.info = {}
+
+    def addTransaction(self, transaction) -> None:
+        """
+        Adding transaction into tree
+
+        :param transaction : it represents the one self.Database in database
+        :type transaction : list
+        :return: None
+        """
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i].item not in currentNode.children:
+                newNode = _Node(transaction[i].item, {})
+                l1 = i - 1
+                lp = []
+                while l1 >= 0:
+                    lp.append(transaction[l1].probability)
+                    l1 -= 1
+                if len(lp) == 0:
+                    newNode.probability = transaction[i].probability
+                else:
+                    newNode.probability = max(lp) * transaction[i].probability
+                currentNode.addChild(newNode)
+                if transaction[i].item in self.summaries:
+                    self.summaries[transaction[i].item].append(newNode)
+                else:
+                    self.summaries[transaction[i].item] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i].item]
+                l1 = i - 1
+                lp = []
+                while l1 >= 0:
+                    lp.append(transaction[l1].probability)
+                    l1 -= 1
+                if len(lp) == 0:
+                    currentNode.probability += transaction[i].probability
+                else:
+                    currentNode.probability += max(lp) * transaction[i].probability
+
+    def addConditionalPattern(self, transaction, sup) -> None:
+        """
+        constructing conditional tree from prefixPaths
+
+        :param transaction : it represents the one self.Database in database
+        :type transaction : list
+        :param sup : support of prefixPath taken at last child of the path
+        :type sup : int
+        :return: None
+
+        """
+        # This method takes transaction, support and constructs the conditional tree
+        currentNode = self.root
+        for i in range(len(transaction)):
+            if transaction[i] not in currentNode.children:
+                newNode = _Node(transaction[i], {})
+                newNode.probability = sup
+                currentNode.addChild(newNode)
+                if transaction[i] in self.summaries:
+                    self.summaries[transaction[i]].append(newNode)
+                else:
+                    self.summaries[transaction[i]] = [newNode]
+                currentNode = newNode
+            else:
+                currentNode = currentNode.children[transaction[i]]
+                currentNode.probability += sup
+
+    def conditionalPatterns(self, alpha) -> tuple:
+        """
+        generates all the conditional patterns of respective node
+
+        :param alpha : it represents the Node in tree
+        :type alpha : _Node
+
+        """
+        # This method generates conditional patterns of node by traversing the tree
+        finalPatterns = []
+        sup = []
+        for i in self.summaries[alpha]:
+            s = i.probability
+            set2 = []
+            while i.parent.item is not None:
+                set2.append(i.parent.item)
+                i = i.parent
+            if len(set2) > 0:
+                set2.reverse()
+                finalPatterns.append(set2)
+                sup.append(s)
+        finalPatterns, support, info = self.conditionalTransactions(finalPatterns, sup)
+        return finalPatterns, support, info
+
+    def removeNode(self, nodeValue) -> None:
+
+        """
+        Removing the node from tree
+
+        :param nodeValue : it represents the node in tree
+        :type nodeValue : node
+        :return: None
+        """
+
+        for i in self.summaries[nodeValue]:
+            del i.parent.children[nodeValue]
+
+    def conditionalTransactions(self, condPatterns, support) -> tuple:
+        """
+        It generates the conditional patterns with frequent items
+
+        :param condPatterns : conditionalPatterns generated from conditionalPattern method for respective node
+        :type condPatterns : list
+        :support : the support of conditional pattern in tree
+        :support : int
+        :return: tuple
+        """
+        global _expSup, _expWSup
+        pat = []
+        sup = []
+        count = {}
+        for i in range(len(condPatterns)):
+            for j in condPatterns[i]:
+                if j in count:
+                    count[j] += support[i]
+                else:
+                    count[j] = support[i]
+        updatedDict = {}
+        updatedDict = {k: v for k, v in count.items() if v >= _expSup}
+        count = 0
+        for p in condPatterns:
+            p1 = [v for v in p if v in updatedDict]
+            trans = sorted(p1, key=lambda x: updatedDict[x], reverse=True)
+            if len(trans) > 0:
+                pat.append(trans)
+                sup.append(support[count])
+                count += 1
+        return pat, sup, updatedDict
+
+    def generatePatterns(self, prefix) -> None:
+        """
+        Generates the patterns
+
+        :param prefix : forms the combination of items
+        :type prefix : list
+        :return: None
+        """
+
+        global _finalPatterns, _expSup, _expWSup, _weights
+        for i in sorted(self.summaries, key=lambda x: (self.info.get(x))):
+            pattern = prefix[:]
+            pattern.append(i)
+            weight = 0
+            for k in pattern:
+                weight = weight + _weights[k]
+            weight = weight/len(pattern)
+            if self.info.get(i) >= _expSup and self.info.get(i) * weight >= _expWSup:
+                _finalPatterns[tuple(pattern)] = self.info.get(i)
+                patterns, support, info = self.conditionalPatterns(i)
+                conditionalTree = _Tree()
+                conditionalTree.info = info.copy()
+                for pat in range(len(patterns)):
+                    conditionalTree.addConditionalPattern(patterns[pat], support[pat])
+                if len(patterns) > 0:
+                    conditionalTree.generatePatterns(pattern)
+            self.removeNode(i)
+
+
+[docs] +class WUFIM(_ab._weightedFrequentPatterns): + """ + :Description: It is one of the algorithm to discover weighted frequent patterns in a uncertain transactional database using PUF-Tree. + + :Reference: Efficient Mining of Weighted Frequent Itemsets in Uncertain Databases, In book: Machine Learning and Data Mining in Pattern Recognition Chun-Wei Jerry Lin, Wensheng Gan, Philippe Fournier Viger, Tzung-Pei Hong + + :param iFile: str : + Name of the Input file to mine complete set of Weighted Uncertain Periodic Frequent Patterns + :param oFile: str : + Name of the output file to store complete set of Weighted Uncertain Periodic Frequent Patterns + :param minSup: str: + minimum support thresholds were tuned to find the appropriate ranges in the limited memory + :param sep: str : + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space. However, the users can override their default separator. + :param wFile: str : + This is a weighted file. + + + :Attributes: + + iFile : file + Name of the Input file or path of the input file + wFile : file + Name of the Input file or path of the input file + oFile : file + Name of the output file or path of the output file + minSup : float or int or str + The user can specify minSup either in count or proportion of database size. + If the program detects the data type of minSup is integer, then it treats minSup is expressed in count. + Otherwise, it will be treated as float. + Example: minSup=10 will be treated as integer, while minSup=10.0 will be treated as float + sep : str + This variable is used to distinguish items from one another in a transaction. The default seperator is tab space or \t. + However, the users can override their default separator. + memoryUSS : float + To store the total amount of USS memory consumed by the program + memoryRSS : float + To store the total amount of RSS memory consumed by the program + startTime:float + To record the start time of the mining process + endTime:float + To record the completion time of the mining process + Database : list + To store the transactions of a database in list + mapSupport : Dictionary + To maintain the information of item and their frequency + lno : int + To represent the total no of transaction + tree : class + To represents the Tree class + itemSetCount : int + To represents the total no of patterns + finalPatterns : dict + To store the complete patterns + + :Methods: + + startMine() + Mining process will start from here + getPatterns() + Complete set of patterns will be retrieved with this function + save(oFile) + Complete set of frequent patterns will be loaded in to a output file + getPatternsAsDataFrame() + Complete set of frequent patterns will be loaded in to a dataframe + getMemoryUSS() + Total amount of USS memory consumed by the mining process will be retrieved from this function + getMemoryRSS() + Total amount of RSS memory consumed by the mining process will be retrieved from this function + getRuntime() + Total amount of runtime taken by the mining process will be retrieved from this function + creatingItemSets(fileName) + Scans the dataset and stores in a list format + frequentOneItem() + Extracts the one-length frequent patterns from database + updateTransactions() + Update the transactions by removing non-frequent items and sort the Database by item decreased support + buildTree() + After updating the Database, remaining items will be added into the tree by setting root node as null + convert() + to convert the user specified value + startMine() + Mining process will start from this function + + **Methods to execute code on terminal** + -------------------------------------------- + .. code-block:: console + + + Format: + + (.venv) $ python3 basic.py <inputFile> <outputFile> <minSup> + + Example Usage: + + (.venv) $ python3 basic.py sampleTDB.txt patterns.txt 3 + + + .. note:: minSup will be considered in support count or frequency + + + **Importing this algorithm into a python program** + ----------------------------------------------------- + .. code-block:: python + + from PAMI.weightedUncertainFrequentPattern.basic import basic as alg + + obj = alg.basic(iFile, wFile, expSup, expWSup) + + obj.startMine() + + Patterns = obj.getPatterns() + + print("Total number of Patterns:", len(Patterns)) + + obj.save(oFile) + + Df = obj.getPatternsAsDataFrame() + + memUSS = obj.getMemoryUSS() + + print("Total Memory in USS:", memUSS) + + memRSS = obj.getMemoryRSS() + + print("Total Memory in RSS", memRSS) + + run = obj.getRuntime() + + print("Total ExecutionTime in seconds:", run) + """ + _startTime = float() + _endTime = float() + _minSup = str() + _finalPatterns = {} + _iFile = " " + _wFile = " " + _oFile = " " + _sep = " " + _memoryUSS = float() + _memoryRSS = float() + _Database = [] + _rank = {} + _expSup = float() + _expWSup = float() + + def __init__(self, iFile, wFile, expSup, expWSup, sep='\t') -> None: + super().__init__(iFile, wFile, expSup, expWSup, sep) + + def _creatingItemSets(self) -> None: + """ + Scans the uncertain transactional dataset + :return: None + """ + self._Database = [] + if isinstance(self._iFile, _ab._pd.DataFrame): + uncertain, data = [], [] + if self._iFile.empty: + print("its empty..") + i = self._iFile.columns.values.tolist() + if 'Transactions' in i: + self._Database = self._iFile['Transactions'].tolist() + if 'uncertain' in i: + uncertain = self._iFile['uncertain'].tolist() + for k in range(len(data)): + tr = [] + for j in range(len(data[k])): + product = _Item(data[k][j], uncertain[k][j]) + tr.append(product) + self._Database.append(tr) + + # print(self.Database) + if isinstance(self._iFile, str): + if _ab._validators.url(self._iFile): + data = _ab._urlopen(self._iFile) + for line in data: + line = line.decode("utf-8") + line = line.strip() + line = [i for i in line.split(':')] + temp1 = [i.rstrip() for i in line[0].split(self._sep)] + temp2 = [i.rstrip() for i in line[1].split(self._sep)] + temp1 = [x for x in temp1 if x] + temp2 = [x for x in temp2 if x] + tr = [] + for i in range(len(temp1)): + item = temp1[i] + probability = float(temp2[i]) + product = _Item(item, probability) + tr.append(product) + self._Database.append(tr) + else: + try: + with open(self._iFile, 'r') as f: + for line in f: + line = line.strip() + line = [i for i in line.split(':')] + temp1 = [i.rstrip() for i in line[0].split(self._sep)] + temp2 = [i.rstrip() for i in line[1].split(self._sep)] + temp1 = [x for x in temp1 if x] + temp2 = [x for x in temp2 if x] + tr = [] + for i in range(len(temp1)): + item = temp1[i] + probability = float(temp2[i]) + product = _Item(item, probability) + tr.append(product) + self._Database.append(tr) + except IOError: + print("File Not Found") + + def _scanningWeights(self) -> None: + """ + Scans the uncertain transactional dataset + :return: None + """ + self._weights = {} + if isinstance(self._wFile, _ab._pd.DataFrame): + weights, data = [], [] + if self._wFile.empty: + print("its empty..") + i = self._wFile.columns.values.tolist() + if 'items' in i: + data = self._wFile['items'].tolist() + if 'weights' in i: + weights = self._wFile['weights'].tolist() + for k in range(len(data)): + self._weights[data[k]] = int(float(weights[k])) + + # print(self.Database) + if isinstance(self._wFile, str): + if _ab._validators.url(self._wFile): + data = _ab._urlopen(self._wFile) + for line in data: + line.strip() + line = line.decode("utf-8") + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._weights[temp[0]] = int(float(temp[1])) + else: + try: + with open(self._wFile, 'r') as f: + for line in f: + temp = [i.rstrip() for i in line.split(self._sep)] + temp = [x for x in temp if x] + self._weights[temp[0]] = float(temp[1]) + except IOError: + print("File Not Found") + + def _frequentOneItem(self) -> tuple: + """ + Takes the self.Database and calculates the support of each item in the dataset and assign the ranks to the items by decreasing support and returns the frequent items list + + :param self.Database : it represents the one self.Database in database + :type self.Database : list + :return: tuple + """ + + mapSupport = {} + for i in self._Database: + for j in i: + if j.item not in mapSupport: + if self._weights.get(j.item) is not None: + mapSupport[j.item] = [j.probability, self._weights[j.item]] + else: + mapSupport[j.item][0] += j.probability + mapSupport = {k: v[0] for k, v in mapSupport.items() if v[0] >= self._expSup and v[0] * v[1] >= self._expWSup} + plist = [k for k, v in sorted(mapSupport.items(), key=lambda x: x[1], reverse=True)] + self.rank = dict([(index, item) for (item, index) in enumerate(plist)]) + return mapSupport, plist + + @staticmethod + def _buildTree(data, info) -> _Tree: + """ + It takes the self.Database and support of each item and construct the main tree with setting root node as null + :param data : it represents the one self.Database in database + :type data : list + :param info : it represents the support of each item + :type info : dictionary + :return: tree + """ + + rootNode = _Tree() + rootNode.info = info.copy() + for i in range(len(data)): + rootNode.addTransaction(data[i]) + return rootNode + + def _updateTransactions(self, dict1) -> list: + """ + Remove the items which are not frequent from self.Database and updates the self.Database with rank of items + + :param dict1 : frequent items with support + :type dict1 : dictionary + :return: list + """ + list1 = [] + for tr in self._Database: + list2 = [] + for i in range(0, len(tr)): + if tr[i].item in dict1: + list2.append(tr[i]) + if len(list2) >= 2: + basket = list2 + basket.sort(key=lambda val: self.rank[val.item]) + list2 = basket + list1.append(list2) + return list1 + + @staticmethod + def _check(i, x) -> int: + """ + To check the presence of item or pattern in transaction + + :param x: it represents the pattern + :type x : list + :param i : represents the uncertain self.Database + :type i : list + :return: integer number + """ + + # This method taken a transaction as input and returns the tree + for m in x: + k = 0 + for n in i: + if m == n.item: + k += 1 + if k == 0: + return 0 + return 1 + + def _convert(self, value) -> float: + """ + To convert the type of user specified minSup value + + :param value: user specified minSup value + :return: converted type minSup value + """ + if type(value) is int: + value = int(value) + if type(value) is float: + value = (len(self._Database) * value) + if type(value) is str: + if '.' in value: + value = (len(self._Database) * value) + else: + value = int(value) + return value + + def _removeFalsePositives(self) -> None: + """ + To remove the false positive patterns generated in frequent patterns. + :return: patterns with accurate probability + """ + global _finalPatterns + periods = {} + for i in self._Database: + for x, y in _finalPatterns.items(): + if len(x) == 1: + periods[x] = y + else: + s = 1 + check = self._check(i, x) + if check == 1: + for j in i: + if j.item in x: + s *= j.probability + if x in periods: + periods[x] += s + else: + periods[x] = s + for x, y in periods.items(): + weight = 0 + for i in x: + weight += self._weights[i] + weight = weight / len(x) + if weight * y >= self._expWSup: + sample = str() + for i in x: + sample = sample + i + "\t" + self._finalPatterns[sample] = y + +
+[docs] + @deprecated("It is recommended to use mine() instead of startMine() for mining process") + def startMine(self) -> None: + """ + startMine() method where the patterns are mined by constructing tree and remove the false patterns by counting the original support of a patterns. + """ + self.mine()
+ + +
+[docs] + def mine(self) -> None: + """ + mine() method where the patterns are mined by constructing tree and remove the false patterns by counting the original support of a patternS + """ + global _expSup, _expWSup, _weights, _finalPatterns + self._startTime = _ab._time.time() + self._Database, self._weights = [], {} + self._creatingItemSets() + self._scanningWeights() + _weights = self._weights + self._expSup = float(self._expSup) + self._expWSup = float(self._expWSup) + _expSup = self._expSup + _expWSup = self._expWSup + self._finalPatterns = {} + mapSupport, plist = self._frequentOneItem() + self.Database1 = self._updateTransactions(mapSupport) + info = {k: v for k, v in mapSupport.items()} + Tree1 = self._buildTree(self.Database1, info) + Tree1.generatePatterns([]) + self._removeFalsePositives() + print("Weighted Frequent patterns were generated successfully using basic algorithm") + self._endTime = _ab._time.time() + process = _ab._psutil.Process(_ab._os.getpid()) + self._memoryUSS = float() + self.memoryRSS = float() + self._memoryUSS = process.memory_full_info().uss + self.memoryRSS = process.memory_info().rss
+ + +
+[docs] + def getMemoryUSS(self) -> float: + """ + Total amount of USS memory consumed by the mining process will be retrieved from this function + :return: returning USS memory consumed by the mining process + :rtype: float + """ + + return self._memoryUSS
+ + +
+[docs] + def getMemoryRSS(self) -> float: + """ + Total amount of RSS memory consumed by the mining process will be retrieved from this function + :return: returning RSS memory consumed by the mining process + :rtype: float + """ + return self.memoryRSS
+ + +
+[docs] + def getRuntime(self) -> float: + """ + Calculating the total amount of runtime taken by the mining process + :return: returning total amount of runtime taken by the mining process + :rtype: float + """ + + return self._endTime - self._startTime
+ + +
+[docs] + def getPatternsAsDataFrame(self) -> pd.DataFrame: + """ + Storing final frequent patterns in a dataframe + :return: returning frequent patterns in a dataframe + :rtype: pd.DataFrame + """ + dataframe = {} + data = [] + for a, b in self._finalPatterns.items(): + s = str() + for i in a: + s = s + i + " " + data.append([s, b]) + dataframe = _ab._pd.DataFrame(data, columns=['Patterns', 'Support']) + return dataframe
+ + +
+[docs] + def save(self, outFile: str) -> None: + """ + Complete set of frequent patterns will be loaded in to an output file + + :param outFile: Specify name of the output file + :type outFile: csv file + :return: None + """ + self.oFile = outFile + writer = open(self.oFile, 'w+') + for x, y in self._finalPatterns.items(): + s = str() + for i in x: + s = s + i + "\t" + s1 = s.strip() + ":" + str(y) + writer.write("%s \n" % s1)
+ + +
+[docs] + def getPatterns(self) -> dict: + """ + Function to send the set of frequent patterns after completion of the mining process + :return: returning frequent patterns + :rtype: dict + """ + return self._finalPatterns
+ + +
+[docs] + def printResults(self) -> None: + """ + This function is used to print the results + :return: None + """ + print("Total number of Weighted Uncertain Frequent Patterns:", len(self.getPatterns())) + print("Total Memory in USS:", self.getMemoryUSS()) + print("Total Memory in RSS", self.getMemoryRSS()) + print("Total ExecutionTime in ms:", self.getRuntime())
+
+ + + +if __name__ == "__main__": + _ap = str() + if len(_ab._sys.argv) == 6 or len(_ab._sys.argv) == 7: + if len(_ab._sys.argv) == 7: + _ap = WUFIM(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5], _ab._sys.argv[6]) + if len(_ab._sys.argv) == 6: + _ap = WUFIM(_ab._sys.argv[1], _ab._sys.argv[3], _ab._sys.argv[4], _ab._sys.argv[5]) + _ap.startMine() + _ap.mine() + print("Total number of Weighted Uncertain Frequent Patterns:", len(_ap.getPatterns())) + _ap.save(_ab._sys.argv[2]) + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + else: + for k in [120, 140, 160, 180, 200]: + _ap = WUFIM('/Users/likhitha/Downloads/uncertainTransaction_T10I4D200K.csv', '/Users/likhitha/Downloads/T10_weights.txt', + k, 500, '\t') + _ap.startMine() + print("Total number of Weighted Uncertain Frequent Patterns:", len(_ap.getPatterns())) + _ap.save('/Users/likhitha/Downloads/WUFIM_output.txt') + print("Total Memory in USS:", _ap.getMemoryUSS()) + print("Total Memory in RSS", _ap.getMemoryRSS()) + print("Total ExecutionTime in ms:", _ap.getRuntime()) + print("Error! The number of input parameters do not match the total number of parameters provided") +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_modules/index.html b/sphinx/_build/html/_modules/index.html new file mode 100644 index 000000000..ee488e009 --- /dev/null +++ b/sphinx/_build/html/_modules/index.html @@ -0,0 +1,261 @@ + + + + + + Overview: module code — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ +

All modules for which code is available

+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/_sources/PAMI.AssociationRules.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.AssociationRules.basic.rst.txt new file mode 100644 index 000000000..fb617f7c6 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.AssociationRules.basic.rst.txt @@ -0,0 +1,53 @@ +PAMI.AssociationRules.basic package +=================================== + +Submodules +---------- + +PAMI.AssociationRules.basic.ARWithConfidence module +--------------------------------------------------- + +.. automodule:: PAMI.AssociationRules.basic.ARWithConfidence + :members: + :undoc-members: + :show-inheritance: + +PAMI.AssociationRules.basic.ARWithLeverage module +------------------------------------------------- + +.. automodule:: PAMI.AssociationRules.basic.ARWithLeverage + :members: + :undoc-members: + :show-inheritance: + +PAMI.AssociationRules.basic.ARWithLift module +--------------------------------------------- + +.. automodule:: PAMI.AssociationRules.basic.ARWithLift + :members: + :undoc-members: + :show-inheritance: + +PAMI.AssociationRules.basic.RuleMiner module +-------------------------------------------- + +.. automodule:: PAMI.AssociationRules.basic.RuleMiner + :members: + :undoc-members: + :show-inheritance: + +PAMI.AssociationRules.basic.abstract module +------------------------------------------- + +.. automodule:: PAMI.AssociationRules.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.AssociationRules.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.AssociationRules.rst.txt b/sphinx/_build/html/_sources/PAMI.AssociationRules.rst.txt new file mode 100644 index 000000000..8a66244e7 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.AssociationRules.rst.txt @@ -0,0 +1,18 @@ +PAMI.AssociationRules package +============================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.AssociationRules.basic + +Module contents +--------------- + +.. automodule:: PAMI.AssociationRules + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.correlatedPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.correlatedPattern.basic.rst.txt new file mode 100644 index 000000000..653a6ac01 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.correlatedPattern.basic.rst.txt @@ -0,0 +1,37 @@ +PAMI.correlatedPattern.basic package +==================================== + +Submodules +---------- + +PAMI.correlatedPattern.basic.CoMine module +------------------------------------------ + +.. automodule:: PAMI.correlatedPattern.basic.CoMine + :members: + :undoc-members: + :show-inheritance: + +PAMI.correlatedPattern.basic.CoMinePlus module +---------------------------------------------- + +.. automodule:: PAMI.correlatedPattern.basic.CoMinePlus + :members: + :undoc-members: + :show-inheritance: + +PAMI.correlatedPattern.basic.abstract module +-------------------------------------------- + +.. automodule:: PAMI.correlatedPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.correlatedPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.correlatedPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.correlatedPattern.rst.txt new file mode 100644 index 000000000..e3df455fd --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.correlatedPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.correlatedPattern package +============================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.correlatedPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.correlatedPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.coveragePattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.coveragePattern.basic.rst.txt new file mode 100644 index 000000000..cca5ccc99 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.coveragePattern.basic.rst.txt @@ -0,0 +1,37 @@ +PAMI.coveragePattern.basic package +================================== + +Submodules +---------- + +PAMI.coveragePattern.basic.CMine module +--------------------------------------- + +.. automodule:: PAMI.coveragePattern.basic.CMine + :members: + :undoc-members: + :show-inheritance: + +PAMI.coveragePattern.basic.CPPG module +-------------------------------------- + +.. automodule:: PAMI.coveragePattern.basic.CPPG + :members: + :undoc-members: + :show-inheritance: + +PAMI.coveragePattern.basic.abstract module +------------------------------------------ + +.. automodule:: PAMI.coveragePattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.coveragePattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.coveragePattern.rst.txt b/sphinx/_build/html/_sources/PAMI.coveragePattern.rst.txt new file mode 100644 index 000000000..0d9d0c38c --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.coveragePattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.coveragePattern package +============================ + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.coveragePattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.coveragePattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.extras.DF2DB.rst.txt b/sphinx/_build/html/_sources/PAMI.extras.DF2DB.rst.txt new file mode 100644 index 000000000..61310517c --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.extras.DF2DB.rst.txt @@ -0,0 +1,77 @@ +PAMI.extras.DF2DB package +========================= + +Submodules +---------- + +PAMI.extras.DF2DB.DF2DB module +------------------------------ + +.. automodule:: PAMI.extras.DF2DB.DF2DB + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.DF2DB.DF2DBPlus module +---------------------------------- + +.. automodule:: PAMI.extras.DF2DB.DF2DBPlus + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.DF2DB.DenseFormatDF module +-------------------------------------- + +.. automodule:: PAMI.extras.DF2DB.DenseFormatDF + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.DF2DB.SparseFormatDF module +--------------------------------------- + +.. automodule:: PAMI.extras.DF2DB.SparseFormatDF + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.DF2DB.createTDB module +---------------------------------- + +.. automodule:: PAMI.extras.DF2DB.createTDB + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.DF2DB.denseDF2DBPlus module +--------------------------------------- + +.. automodule:: PAMI.extras.DF2DB.denseDF2DBPlus + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.DF2DB.denseDF2DB\_dump module +----------------------------------------- + +.. automodule:: PAMI.extras.DF2DB.denseDF2DB_dump + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.DF2DB.sparseDF2DBPlus module +---------------------------------------- + +.. automodule:: PAMI.extras.DF2DB.sparseDF2DBPlus + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.extras.DF2DB + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.extras.calculateMISValues.rst.txt b/sphinx/_build/html/_sources/PAMI.extras.calculateMISValues.rst.txt new file mode 100644 index 000000000..5eeec51d7 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.extras.calculateMISValues.rst.txt @@ -0,0 +1,29 @@ +PAMI.extras.calculateMISValues package +====================================== + +Submodules +---------- + +PAMI.extras.calculateMISValues.usingBeta module +----------------------------------------------- + +.. automodule:: PAMI.extras.calculateMISValues.usingBeta + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.calculateMISValues.usingSD module +--------------------------------------------- + +.. automodule:: PAMI.extras.calculateMISValues.usingSD + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.extras.calculateMISValues + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.extras.dbStats.rst.txt b/sphinx/_build/html/_sources/PAMI.extras.dbStats.rst.txt new file mode 100644 index 000000000..01e7e7efd --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.extras.dbStats.rst.txt @@ -0,0 +1,77 @@ +PAMI.extras.dbStats package +=========================== + +Submodules +---------- + +PAMI.extras.dbStats.FuzzyDatabase module +---------------------------------------- + +.. automodule:: PAMI.extras.dbStats.FuzzyDatabase + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats module +--------------------------------------------------------------- + +.. automodule:: PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.dbStats.SequentialDatabase module +--------------------------------------------- + +.. automodule:: PAMI.extras.dbStats.SequentialDatabase + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.dbStats.TemporalDatabase module +------------------------------------------- + +.. automodule:: PAMI.extras.dbStats.TemporalDatabase + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.dbStats.TransactionalDatabase module +------------------------------------------------ + +.. automodule:: PAMI.extras.dbStats.TransactionalDatabase + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.dbStats.UncertainTemporalDatabase module +---------------------------------------------------- + +.. automodule:: PAMI.extras.dbStats.UncertainTemporalDatabase + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.dbStats.UncertainTransactionalDatabase module +--------------------------------------------------------- + +.. automodule:: PAMI.extras.dbStats.UncertainTransactionalDatabase + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.dbStats.UtilityDatabase module +------------------------------------------ + +.. automodule:: PAMI.extras.dbStats.UtilityDatabase + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.extras.dbStats + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.extras.fuzzyTransformation.rst.txt b/sphinx/_build/html/_sources/PAMI.extras.fuzzyTransformation.rst.txt new file mode 100644 index 000000000..f9b453c91 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.extras.fuzzyTransformation.rst.txt @@ -0,0 +1,45 @@ +PAMI.extras.fuzzyTransformation package +======================================= + +Submodules +---------- + +PAMI.extras.fuzzyTransformation.abstract module +----------------------------------------------- + +.. automodule:: PAMI.extras.fuzzyTransformation.abstract + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.fuzzyTransformation.temporalToFuzzy module +------------------------------------------------------ + +.. automodule:: PAMI.extras.fuzzyTransformation.temporalToFuzzy + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.fuzzyTransformation.transactionalToFuzzy module +----------------------------------------------------------- + +.. automodule:: PAMI.extras.fuzzyTransformation.transactionalToFuzzy + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.fuzzyTransformation.utilityToFuzzy module +----------------------------------------------------- + +.. automodule:: PAMI.extras.fuzzyTransformation.utilityToFuzzy + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.extras.fuzzyTransformation + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.extras.generateDatabase.rst.txt b/sphinx/_build/html/_sources/PAMI.extras.generateDatabase.rst.txt new file mode 100644 index 000000000..09a390549 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.extras.generateDatabase.rst.txt @@ -0,0 +1,37 @@ +PAMI.extras.generateDatabase package +==================================== + +Submodules +---------- + +PAMI.extras.generateDatabase.generateSpatioTemporalDatabase module +------------------------------------------------------------------ + +.. automodule:: PAMI.extras.generateDatabase.generateSpatioTemporalDatabase + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.generateDatabase.generateTemporalDatabase module +------------------------------------------------------------ + +.. automodule:: PAMI.extras.generateDatabase.generateTemporalDatabase + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.generateDatabase.generateTransactionalDatabase module +----------------------------------------------------------------- + +.. automodule:: PAMI.extras.generateDatabase.generateTransactionalDatabase + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.extras.generateDatabase + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.extras.graph.rst.txt b/sphinx/_build/html/_sources/PAMI.extras.graph.rst.txt new file mode 100644 index 000000000..f909c8001 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.extras.graph.rst.txt @@ -0,0 +1,61 @@ +PAMI.extras.graph package +========================= + +Submodules +---------- + +PAMI.extras.graph.DF2Fig module +------------------------------- + +.. automodule:: PAMI.extras.graph.DF2Fig + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.graph.DF2Tex module +------------------------------- + +.. automodule:: PAMI.extras.graph.DF2Tex + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.graph.plotLineGraphFromDictionary module +---------------------------------------------------- + +.. automodule:: PAMI.extras.graph.plotLineGraphFromDictionary + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.graph.plotLineGraphsFromDataFrame module +---------------------------------------------------- + +.. automodule:: PAMI.extras.graph.plotLineGraphsFromDataFrame + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.graph.visualizeFuzzyPatterns module +----------------------------------------------- + +.. automodule:: PAMI.extras.graph.visualizeFuzzyPatterns + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.graph.visualizePatterns module +------------------------------------------ + +.. automodule:: PAMI.extras.graph.visualizePatterns + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.extras.graph + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.extras.image2Database.rst.txt b/sphinx/_build/html/_sources/PAMI.extras.image2Database.rst.txt new file mode 100644 index 000000000..18d5f71c9 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.extras.image2Database.rst.txt @@ -0,0 +1,10 @@ +PAMI.extras.image2Database package +================================== + +Module contents +--------------- + +.. automodule:: PAMI.extras.image2Database + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.extras.imageProcessing.rst.txt b/sphinx/_build/html/_sources/PAMI.extras.imageProcessing.rst.txt new file mode 100644 index 000000000..f4e358f5e --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.extras.imageProcessing.rst.txt @@ -0,0 +1,21 @@ +PAMI.extras.imageProcessing package +=================================== + +Submodules +---------- + +PAMI.extras.imageProcessing.imagery2Databases module +---------------------------------------------------- + +.. automodule:: PAMI.extras.imageProcessing.imagery2Databases + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.extras.imageProcessing + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.extras.messaging.rst.txt b/sphinx/_build/html/_sources/PAMI.extras.messaging.rst.txt new file mode 100644 index 000000000..4c61343fa --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.extras.messaging.rst.txt @@ -0,0 +1,29 @@ +PAMI.extras.messaging package +============================= + +Submodules +---------- + +PAMI.extras.messaging.discord module +------------------------------------ + +.. automodule:: PAMI.extras.messaging.discord + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.messaging.gmail module +---------------------------------- + +.. automodule:: PAMI.extras.messaging.gmail + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.extras.messaging + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.extras.neighbours.rst.txt b/sphinx/_build/html/_sources/PAMI.extras.neighbours.rst.txt new file mode 100644 index 000000000..6aa87be9a --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.extras.neighbours.rst.txt @@ -0,0 +1,37 @@ +PAMI.extras.neighbours package +============================== + +Submodules +---------- + +PAMI.extras.neighbours.findNeighborsUsingEuclideanDistanceforPointInfo module +----------------------------------------------------------------------------- + +.. automodule:: PAMI.extras.neighbours.findNeighborsUsingEuclideanDistanceforPointInfo + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.neighbours.findNeighboursUsingEuclidean module +---------------------------------------------------------- + +.. automodule:: PAMI.extras.neighbours.findNeighboursUsingEuclidean + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.neighbours.findNeighboursUsingGeodesic module +--------------------------------------------------------- + +.. automodule:: PAMI.extras.neighbours.findNeighboursUsingGeodesic + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.extras.neighbours + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.extras.rst.txt b/sphinx/_build/html/_sources/PAMI.extras.rst.txt new file mode 100644 index 000000000..d647badf3 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.extras.rst.txt @@ -0,0 +1,90 @@ +PAMI.extras package +=================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.extras.DF2DB + PAMI.extras.calculateMISValues + PAMI.extras.dbStats + PAMI.extras.fuzzyTransformation + PAMI.extras.generateDatabase + PAMI.extras.graph + PAMI.extras.image2Database + PAMI.extras.imageProcessing + PAMI.extras.messaging + PAMI.extras.neighbours + PAMI.extras.sampleDatasets + PAMI.extras.stats + PAMI.extras.syntheticDataGenerator + PAMI.extras.visualize + +Submodules +---------- + +PAMI.extras.convertMultiTSIntoFuzzy module +------------------------------------------ + +.. automodule:: PAMI.extras.convertMultiTSIntoFuzzy + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.generateLatexGraphFile module +----------------------------------------- + +.. automodule:: PAMI.extras.generateLatexGraphFile + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.plotPointOnMap module +--------------------------------- + +.. automodule:: PAMI.extras.plotPointOnMap + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.plotPointOnMap\_dump module +--------------------------------------- + +.. automodule:: PAMI.extras.plotPointOnMap_dump + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.scatterPlotSpatialPoints module +------------------------------------------- + +.. automodule:: PAMI.extras.scatterPlotSpatialPoints + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.topKPatterns module +------------------------------- + +.. automodule:: PAMI.extras.topKPatterns + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.uncertaindb\_convert module +--------------------------------------- + +.. automodule:: PAMI.extras.uncertaindb_convert + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.extras + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.extras.sampleDatasets.rst.txt b/sphinx/_build/html/_sources/PAMI.extras.sampleDatasets.rst.txt new file mode 100644 index 000000000..e8b31dbf6 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.extras.sampleDatasets.rst.txt @@ -0,0 +1,10 @@ +PAMI.extras.sampleDatasets package +================================== + +Module contents +--------------- + +.. automodule:: PAMI.extras.sampleDatasets + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.extras.stats.rst.txt b/sphinx/_build/html/_sources/PAMI.extras.stats.rst.txt new file mode 100644 index 000000000..2bba8218c --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.extras.stats.rst.txt @@ -0,0 +1,53 @@ +PAMI.extras.stats package +========================= + +Submodules +---------- + +PAMI.extras.stats.TransactionalDatabase module +---------------------------------------------- + +.. automodule:: PAMI.extras.stats.TransactionalDatabase + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.stats.graphDatabase module +-------------------------------------- + +.. automodule:: PAMI.extras.stats.graphDatabase + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.stats.sequentialDatabase module +------------------------------------------- + +.. automodule:: PAMI.extras.stats.sequentialDatabase + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.stats.temporalDatabase module +----------------------------------------- + +.. automodule:: PAMI.extras.stats.temporalDatabase + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.stats.utilityDatabase module +---------------------------------------- + +.. automodule:: PAMI.extras.stats.utilityDatabase + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.extras.stats + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.extras.syntheticDataGenerator.rst.txt b/sphinx/_build/html/_sources/PAMI.extras.syntheticDataGenerator.rst.txt new file mode 100644 index 000000000..5a95004c5 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.extras.syntheticDataGenerator.rst.txt @@ -0,0 +1,189 @@ +PAMI.extras.syntheticDataGenerator package +========================================== + +Submodules +---------- + +PAMI.extras.syntheticDataGenerator.TemporalDatabase module +---------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.TemporalDatabase + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.TransactionalDatabase module +--------------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.TransactionalDatabase + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTemporal module +------------------------------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTemporal + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTransactions module +----------------------------------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTransactions + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialUncertainTransaction module +------------------------------------------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialUncertainTransaction + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.createSyntheticTemporal module +----------------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.createSyntheticTemporal + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.createSyntheticTransactions module +--------------------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.createSyntheticTransactions + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTemporal module +-------------------------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTemporal + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTransactions module +------------------------------------------------------------------------------ + +.. automodule:: PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTransactions + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.createSyntheticUtility module +---------------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.createSyntheticUtility + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.fuzzyDatabase module +------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.fuzzyDatabase + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.generateTemporal module +---------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.generateTemporal + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.generateTransactional module +--------------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.generateTransactional + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.generateUncertainTemporal module +------------------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.generateUncertainTemporal + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.generateUncertainTransactional module +------------------------------------------------------------------------ + +.. automodule:: PAMI.extras.syntheticDataGenerator.generateUncertainTransactional + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.generateUtilityTemporal module +----------------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.generateUtilityTemporal + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.generateUtilityTransactional module +---------------------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.generateUtilityTransactional + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.georeferencedTemporalDatabase module +----------------------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.georeferencedTemporalDatabase + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.georeferencedTransactionalDatabase module +---------------------------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.georeferencedTransactionalDatabase + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase module +------------------------------------------------------------------ + +.. automodule:: PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.temporalDatabaseGen module +------------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.temporalDatabaseGen + :members: + :undoc-members: + :show-inheritance: + +PAMI.extras.syntheticDataGenerator.utilityDatabase module +--------------------------------------------------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator.utilityDatabase + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.extras.syntheticDataGenerator + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.extras.visualize.rst.txt b/sphinx/_build/html/_sources/PAMI.extras.visualize.rst.txt new file mode 100644 index 000000000..c80357bb7 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.extras.visualize.rst.txt @@ -0,0 +1,21 @@ +PAMI.extras.visualize package +============================= + +Submodules +---------- + +PAMI.extras.visualize.graphs module +----------------------------------- + +.. automodule:: PAMI.extras.visualize.graphs + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.extras.visualize + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.faultTolerantFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.faultTolerantFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..7246ca90b --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.faultTolerantFrequentPattern.basic.rst.txt @@ -0,0 +1,37 @@ +PAMI.faultTolerantFrequentPattern.basic package +=============================================== + +Submodules +---------- + +PAMI.faultTolerantFrequentPattern.basic.FTApriori module +-------------------------------------------------------- + +.. automodule:: PAMI.faultTolerantFrequentPattern.basic.FTApriori + :members: + :undoc-members: + :show-inheritance: + +PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth module +--------------------------------------------------------- + +.. automodule:: PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.faultTolerantFrequentPattern.basic.abstract module +------------------------------------------------------- + +.. automodule:: PAMI.faultTolerantFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.faultTolerantFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.faultTolerantFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.faultTolerantFrequentPattern.rst.txt new file mode 100644 index 000000000..57170bb19 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.faultTolerantFrequentPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.faultTolerantFrequentPattern package +========================================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.faultTolerantFrequentPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.faultTolerantFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.frequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.frequentPattern.basic.rst.txt new file mode 100644 index 000000000..70a19c006 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.frequentPattern.basic.rst.txt @@ -0,0 +1,61 @@ +PAMI.frequentPattern.basic package +================================== + +Submodules +---------- + +PAMI.frequentPattern.basic.Apriori module +----------------------------------------- + +.. automodule:: PAMI.frequentPattern.basic.Apriori + :members: + :undoc-members: + :show-inheritance: + +PAMI.frequentPattern.basic.ECLAT module +--------------------------------------- + +.. automodule:: PAMI.frequentPattern.basic.ECLAT + :members: + :undoc-members: + :show-inheritance: + +PAMI.frequentPattern.basic.ECLATDiffset module +---------------------------------------------- + +.. automodule:: PAMI.frequentPattern.basic.ECLATDiffset + :members: + :undoc-members: + :show-inheritance: + +PAMI.frequentPattern.basic.ECLATbitset module +--------------------------------------------- + +.. automodule:: PAMI.frequentPattern.basic.ECLATbitset + :members: + :undoc-members: + :show-inheritance: + +PAMI.frequentPattern.basic.FPGrowth module +------------------------------------------ + +.. automodule:: PAMI.frequentPattern.basic.FPGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.frequentPattern.basic.abstract module +------------------------------------------ + +.. automodule:: PAMI.frequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.frequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.frequentPattern.closed.rst.txt b/sphinx/_build/html/_sources/PAMI.frequentPattern.closed.rst.txt new file mode 100644 index 000000000..813c15e50 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.frequentPattern.closed.rst.txt @@ -0,0 +1,29 @@ +PAMI.frequentPattern.closed package +=================================== + +Submodules +---------- + +PAMI.frequentPattern.closed.CHARM module +---------------------------------------- + +.. automodule:: PAMI.frequentPattern.closed.CHARM + :members: + :undoc-members: + :show-inheritance: + +PAMI.frequentPattern.closed.abstract module +------------------------------------------- + +.. automodule:: PAMI.frequentPattern.closed.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.frequentPattern.closed + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.frequentPattern.cuda.rst.txt b/sphinx/_build/html/_sources/PAMI.frequentPattern.cuda.rst.txt new file mode 100644 index 000000000..42555ab52 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.frequentPattern.cuda.rst.txt @@ -0,0 +1,77 @@ +PAMI.frequentPattern.cuda package +================================= + +Submodules +---------- + +PAMI.frequentPattern.cuda.abstract module +----------------------------------------- + +.. automodule:: PAMI.frequentPattern.cuda.abstract + :members: + :undoc-members: + :show-inheritance: + +PAMI.frequentPattern.cuda.cuApriori module +------------------------------------------ + +.. automodule:: PAMI.frequentPattern.cuda.cuApriori + :members: + :undoc-members: + :show-inheritance: + +PAMI.frequentPattern.cuda.cuAprioriBit module +--------------------------------------------- + +.. automodule:: PAMI.frequentPattern.cuda.cuAprioriBit + :members: + :undoc-members: + :show-inheritance: + +PAMI.frequentPattern.cuda.cuEclat module +---------------------------------------- + +.. automodule:: PAMI.frequentPattern.cuda.cuEclat + :members: + :undoc-members: + :show-inheritance: + +PAMI.frequentPattern.cuda.cuEclatBit module +------------------------------------------- + +.. automodule:: PAMI.frequentPattern.cuda.cuEclatBit + :members: + :undoc-members: + :show-inheritance: + +PAMI.frequentPattern.cuda.cudaAprioriGCT module +----------------------------------------------- + +.. automodule:: PAMI.frequentPattern.cuda.cudaAprioriGCT + :members: + :undoc-members: + :show-inheritance: + +PAMI.frequentPattern.cuda.cudaAprioriTID module +----------------------------------------------- + +.. automodule:: PAMI.frequentPattern.cuda.cudaAprioriTID + :members: + :undoc-members: + :show-inheritance: + +PAMI.frequentPattern.cuda.cudaEclatGCT module +--------------------------------------------- + +.. automodule:: PAMI.frequentPattern.cuda.cudaEclatGCT + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.frequentPattern.cuda + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.frequentPattern.maximal.rst.txt b/sphinx/_build/html/_sources/PAMI.frequentPattern.maximal.rst.txt new file mode 100644 index 000000000..6bb25bae4 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.frequentPattern.maximal.rst.txt @@ -0,0 +1,29 @@ +PAMI.frequentPattern.maximal package +==================================== + +Submodules +---------- + +PAMI.frequentPattern.maximal.MaxFPGrowth module +----------------------------------------------- + +.. automodule:: PAMI.frequentPattern.maximal.MaxFPGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.frequentPattern.maximal.abstract module +-------------------------------------------- + +.. automodule:: PAMI.frequentPattern.maximal.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.frequentPattern.maximal + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.frequentPattern.pyspark.rst.txt b/sphinx/_build/html/_sources/PAMI.frequentPattern.pyspark.rst.txt new file mode 100644 index 000000000..c48040cf7 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.frequentPattern.pyspark.rst.txt @@ -0,0 +1,45 @@ +PAMI.frequentPattern.pyspark package +==================================== + +Submodules +---------- + +PAMI.frequentPattern.pyspark.abstract module +-------------------------------------------- + +.. automodule:: PAMI.frequentPattern.pyspark.abstract + :members: + :undoc-members: + :show-inheritance: + +PAMI.frequentPattern.pyspark.parallelApriori module +--------------------------------------------------- + +.. automodule:: PAMI.frequentPattern.pyspark.parallelApriori + :members: + :undoc-members: + :show-inheritance: + +PAMI.frequentPattern.pyspark.parallelECLAT module +------------------------------------------------- + +.. automodule:: PAMI.frequentPattern.pyspark.parallelECLAT + :members: + :undoc-members: + :show-inheritance: + +PAMI.frequentPattern.pyspark.parallelFPGrowth module +---------------------------------------------------- + +.. automodule:: PAMI.frequentPattern.pyspark.parallelFPGrowth + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.frequentPattern.pyspark + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.frequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.frequentPattern.rst.txt new file mode 100644 index 000000000..aa0e95683 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.frequentPattern.rst.txt @@ -0,0 +1,23 @@ +PAMI.frequentPattern package +============================ + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.frequentPattern.basic + PAMI.frequentPattern.closed + PAMI.frequentPattern.cuda + PAMI.frequentPattern.maximal + PAMI.frequentPattern.pyspark + PAMI.frequentPattern.topk + +Module contents +--------------- + +.. automodule:: PAMI.frequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.frequentPattern.topk.rst.txt b/sphinx/_build/html/_sources/PAMI.frequentPattern.topk.rst.txt new file mode 100644 index 000000000..44e3e5f2d --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.frequentPattern.topk.rst.txt @@ -0,0 +1,29 @@ +PAMI.frequentPattern.topk package +================================= + +Submodules +---------- + +PAMI.frequentPattern.topk.FAE module +------------------------------------ + +.. automodule:: PAMI.frequentPattern.topk.FAE + :members: + :undoc-members: + :show-inheritance: + +PAMI.frequentPattern.topk.abstract module +----------------------------------------- + +.. automodule:: PAMI.frequentPattern.topk.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.frequentPattern.topk + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.fuzzyCorrelatedPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.fuzzyCorrelatedPattern.basic.rst.txt new file mode 100644 index 000000000..8eda08669 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.fuzzyCorrelatedPattern.basic.rst.txt @@ -0,0 +1,29 @@ +PAMI.fuzzyCorrelatedPattern.basic package +========================================= + +Submodules +---------- + +PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth module +-------------------------------------------------- + +.. automodule:: PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.fuzzyCorrelatedPattern.basic.abstract module +------------------------------------------------- + +.. automodule:: PAMI.fuzzyCorrelatedPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.fuzzyCorrelatedPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.fuzzyCorrelatedPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.fuzzyCorrelatedPattern.rst.txt new file mode 100644 index 000000000..5971e7421 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.fuzzyCorrelatedPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.fuzzyCorrelatedPattern package +=================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.fuzzyCorrelatedPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.fuzzyCorrelatedPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.fuzzyFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.fuzzyFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..bd1fdcb64 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.fuzzyFrequentPattern.basic.rst.txt @@ -0,0 +1,37 @@ +PAMI.fuzzyFrequentPattern.basic package +======================================= + +Submodules +---------- + +PAMI.fuzzyFrequentPattern.basic.FFIMiner module +----------------------------------------------- + +.. automodule:: PAMI.fuzzyFrequentPattern.basic.FFIMiner + :members: + :undoc-members: + :show-inheritance: + +PAMI.fuzzyFrequentPattern.basic.FFIMiner\_old module +---------------------------------------------------- + +.. automodule:: PAMI.fuzzyFrequentPattern.basic.FFIMiner_old + :members: + :undoc-members: + :show-inheritance: + +PAMI.fuzzyFrequentPattern.basic.abstract module +----------------------------------------------- + +.. automodule:: PAMI.fuzzyFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.fuzzyFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.fuzzyFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.fuzzyFrequentPattern.rst.txt new file mode 100644 index 000000000..1e31cc03e --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.fuzzyFrequentPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.fuzzyFrequentPattern package +================================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.fuzzyFrequentPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.fuzzyFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.fuzzyGeoreferencedFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.fuzzyGeoreferencedFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..6c7720ca2 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.fuzzyGeoreferencedFrequentPattern.basic.rst.txt @@ -0,0 +1,37 @@ +PAMI.fuzzyGeoreferencedFrequentPattern.basic package +==================================================== + +Submodules +---------- + +PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner module +------------------------------------------------------------- + +.. automodule:: PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner + :members: + :undoc-members: + :show-inheritance: + +PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner\_old module +------------------------------------------------------------------ + +.. automodule:: PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old + :members: + :undoc-members: + :show-inheritance: + +PAMI.fuzzyGeoreferencedFrequentPattern.basic.abstract module +------------------------------------------------------------ + +.. automodule:: PAMI.fuzzyGeoreferencedFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.fuzzyGeoreferencedFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.fuzzyGeoreferencedFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.fuzzyGeoreferencedFrequentPattern.rst.txt new file mode 100644 index 000000000..141a31768 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.fuzzyGeoreferencedFrequentPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.fuzzyGeoreferencedFrequentPattern package +============================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.fuzzyGeoreferencedFrequentPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.fuzzyGeoreferencedFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..c9e48b61a --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.rst.txt @@ -0,0 +1,37 @@ +PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic package +============================================================ + +Submodules +---------- + +PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner module +---------------------------------------------------------------------- + +.. automodule:: PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner + :members: + :undoc-members: + :show-inheritance: + +PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner\_old module +--------------------------------------------------------------------------- + +.. automodule:: PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old + :members: + :undoc-members: + :show-inheritance: + +PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.abstract module +-------------------------------------------------------------------- + +.. automodule:: PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.rst.txt new file mode 100644 index 000000000..5a99c7f77 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.fuzzyGeoreferencedPeriodicFrequentPattern package +====================================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.fuzzyGeoreferencedPeriodicFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.fuzzyPartialPeriodicPatterns.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.fuzzyPartialPeriodicPatterns.basic.rst.txt new file mode 100644 index 000000000..ba0a4f630 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.fuzzyPartialPeriodicPatterns.basic.rst.txt @@ -0,0 +1,29 @@ +PAMI.fuzzyPartialPeriodicPatterns.basic package +=============================================== + +Submodules +---------- + +PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner module +------------------------------------------------------- + +.. automodule:: PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner + :members: + :undoc-members: + :show-inheritance: + +PAMI.fuzzyPartialPeriodicPatterns.basic.abstract module +------------------------------------------------------- + +.. automodule:: PAMI.fuzzyPartialPeriodicPatterns.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.fuzzyPartialPeriodicPatterns.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.fuzzyPartialPeriodicPatterns.rst.txt b/sphinx/_build/html/_sources/PAMI.fuzzyPartialPeriodicPatterns.rst.txt new file mode 100644 index 000000000..d909bbf60 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.fuzzyPartialPeriodicPatterns.rst.txt @@ -0,0 +1,18 @@ +PAMI.fuzzyPartialPeriodicPatterns package +========================================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.fuzzyPartialPeriodicPatterns.basic + +Module contents +--------------- + +.. automodule:: PAMI.fuzzyPartialPeriodicPatterns + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.fuzzyPeriodicFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.fuzzyPeriodicFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..720f3a269 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.fuzzyPeriodicFrequentPattern.basic.rst.txt @@ -0,0 +1,37 @@ +PAMI.fuzzyPeriodicFrequentPattern.basic package +=============================================== + +Submodules +---------- + +PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner module +-------------------------------------------------------- + +.. automodule:: PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner + :members: + :undoc-members: + :show-inheritance: + +PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner\_old module +------------------------------------------------------------- + +.. automodule:: PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old + :members: + :undoc-members: + :show-inheritance: + +PAMI.fuzzyPeriodicFrequentPattern.basic.abstract module +------------------------------------------------------- + +.. automodule:: PAMI.fuzzyPeriodicFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.fuzzyPeriodicFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.fuzzyPeriodicFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.fuzzyPeriodicFrequentPattern.rst.txt new file mode 100644 index 000000000..2e8a09e92 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.fuzzyPeriodicFrequentPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.fuzzyPeriodicFrequentPattern package +========================================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.fuzzyPeriodicFrequentPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.fuzzyPeriodicFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.geoReferencedPeriodicFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.geoReferencedPeriodicFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..1137c106e --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.geoReferencedPeriodicFrequentPattern.basic.rst.txt @@ -0,0 +1,29 @@ +PAMI.geoReferencedPeriodicFrequentPattern.basic package +======================================================= + +Submodules +---------- + +PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner module +---------------------------------------------------------------- + +.. automodule:: PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner + :members: + :undoc-members: + :show-inheritance: + +PAMI.geoReferencedPeriodicFrequentPattern.basic.abstract module +--------------------------------------------------------------- + +.. automodule:: PAMI.geoReferencedPeriodicFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.geoReferencedPeriodicFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.geoReferencedPeriodicFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.geoReferencedPeriodicFrequentPattern.rst.txt new file mode 100644 index 000000000..724ce8780 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.geoReferencedPeriodicFrequentPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.geoReferencedPeriodicFrequentPattern package +================================================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.geoReferencedPeriodicFrequentPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.geoReferencedPeriodicFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.georeferencedFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.georeferencedFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..0628536e3 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.georeferencedFrequentPattern.basic.rst.txt @@ -0,0 +1,37 @@ +PAMI.georeferencedFrequentPattern.basic package +=============================================== + +Submodules +---------- + +PAMI.georeferencedFrequentPattern.basic.FSPGrowth module +-------------------------------------------------------- + +.. automodule:: PAMI.georeferencedFrequentPattern.basic.FSPGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.georeferencedFrequentPattern.basic.SpatialECLAT module +----------------------------------------------------------- + +.. automodule:: PAMI.georeferencedFrequentPattern.basic.SpatialECLAT + :members: + :undoc-members: + :show-inheritance: + +PAMI.georeferencedFrequentPattern.basic.abstract module +------------------------------------------------------- + +.. automodule:: PAMI.georeferencedFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.georeferencedFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.georeferencedFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.georeferencedFrequentPattern.rst.txt new file mode 100644 index 000000000..ea1b54c08 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.georeferencedFrequentPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.georeferencedFrequentPattern package +========================================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.georeferencedFrequentPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.georeferencedFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.georeferencedFrequentSequencePattern.rst.txt b/sphinx/_build/html/_sources/PAMI.georeferencedFrequentSequencePattern.rst.txt new file mode 100644 index 000000000..004892eaa --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.georeferencedFrequentSequencePattern.rst.txt @@ -0,0 +1,21 @@ +PAMI.georeferencedFrequentSequencePattern package +================================================= + +Submodules +---------- + +PAMI.georeferencedFrequentSequencePattern.abstract module +--------------------------------------------------------- + +.. automodule:: PAMI.georeferencedFrequentSequencePattern.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.georeferencedFrequentSequencePattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.georeferencedPartialPeriodicPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.georeferencedPartialPeriodicPattern.basic.rst.txt new file mode 100644 index 000000000..d1700d01f --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.georeferencedPartialPeriodicPattern.basic.rst.txt @@ -0,0 +1,29 @@ +PAMI.georeferencedPartialPeriodicPattern.basic package +====================================================== + +Submodules +---------- + +PAMI.georeferencedPartialPeriodicPattern.basic.STEclat module +------------------------------------------------------------- + +.. automodule:: PAMI.georeferencedPartialPeriodicPattern.basic.STEclat + :members: + :undoc-members: + :show-inheritance: + +PAMI.georeferencedPartialPeriodicPattern.basic.abstract module +-------------------------------------------------------------- + +.. automodule:: PAMI.georeferencedPartialPeriodicPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.georeferencedPartialPeriodicPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.georeferencedPartialPeriodicPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.georeferencedPartialPeriodicPattern.rst.txt new file mode 100644 index 000000000..10ddb38d7 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.georeferencedPartialPeriodicPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.georeferencedPartialPeriodicPattern package +================================================ + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.georeferencedPartialPeriodicPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.georeferencedPartialPeriodicPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.highUtilityFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.highUtilityFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..ce9b6dc09 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.highUtilityFrequentPattern.basic.rst.txt @@ -0,0 +1,29 @@ +PAMI.highUtilityFrequentPattern.basic package +============================================= + +Submodules +---------- + +PAMI.highUtilityFrequentPattern.basic.HUFIM module +-------------------------------------------------- + +.. automodule:: PAMI.highUtilityFrequentPattern.basic.HUFIM + :members: + :undoc-members: + :show-inheritance: + +PAMI.highUtilityFrequentPattern.basic.abstract module +----------------------------------------------------- + +.. automodule:: PAMI.highUtilityFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.highUtilityFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.highUtilityFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.highUtilityFrequentPattern.rst.txt new file mode 100644 index 000000000..7ec32c3ec --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.highUtilityFrequentPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.highUtilityFrequentPattern package +======================================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.highUtilityFrequentPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.highUtilityFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.highUtilityGeoreferencedFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.highUtilityGeoreferencedFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..c204cc7f8 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.highUtilityGeoreferencedFrequentPattern.basic.rst.txt @@ -0,0 +1,29 @@ +PAMI.highUtilityGeoreferencedFrequentPattern.basic package +========================================================== + +Submodules +---------- + +PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM module +---------------------------------------------------------------- + +.. automodule:: PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM + :members: + :undoc-members: + :show-inheritance: + +PAMI.highUtilityGeoreferencedFrequentPattern.basic.abstract module +------------------------------------------------------------------ + +.. automodule:: PAMI.highUtilityGeoreferencedFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.highUtilityGeoreferencedFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.highUtilityGeoreferencedFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.highUtilityGeoreferencedFrequentPattern.rst.txt new file mode 100644 index 000000000..c103d95c9 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.highUtilityGeoreferencedFrequentPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.highUtilityGeoreferencedFrequentPattern package +==================================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.highUtilityGeoreferencedFrequentPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.highUtilityGeoreferencedFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.highUtilityPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.highUtilityPattern.basic.rst.txt new file mode 100644 index 000000000..2cfc2b50c --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.highUtilityPattern.basic.rst.txt @@ -0,0 +1,53 @@ +PAMI.highUtilityPattern.basic package +===================================== + +Submodules +---------- + +PAMI.highUtilityPattern.basic.EFIM module +----------------------------------------- + +.. automodule:: PAMI.highUtilityPattern.basic.EFIM + :members: + :undoc-members: + :show-inheritance: + +PAMI.highUtilityPattern.basic.HMiner module +------------------------------------------- + +.. automodule:: PAMI.highUtilityPattern.basic.HMiner + :members: + :undoc-members: + :show-inheritance: + +PAMI.highUtilityPattern.basic.UPGrowth module +--------------------------------------------- + +.. automodule:: PAMI.highUtilityPattern.basic.UPGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.highUtilityPattern.basic.abstract module +--------------------------------------------- + +.. automodule:: PAMI.highUtilityPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +PAMI.highUtilityPattern.basic.efimParallel module +------------------------------------------------- + +.. automodule:: PAMI.highUtilityPattern.basic.efimParallel + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.highUtilityPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.highUtilityPattern.parallel.rst.txt b/sphinx/_build/html/_sources/PAMI.highUtilityPattern.parallel.rst.txt new file mode 100644 index 000000000..94c61d602 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.highUtilityPattern.parallel.rst.txt @@ -0,0 +1,29 @@ +PAMI.highUtilityPattern.parallel package +======================================== + +Submodules +---------- + +PAMI.highUtilityPattern.parallel.abstract module +------------------------------------------------ + +.. automodule:: PAMI.highUtilityPattern.parallel.abstract + :members: + :undoc-members: + :show-inheritance: + +PAMI.highUtilityPattern.parallel.efimparallel module +---------------------------------------------------- + +.. automodule:: PAMI.highUtilityPattern.parallel.efimparallel + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.highUtilityPattern.parallel + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.highUtilityPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.highUtilityPattern.rst.txt new file mode 100644 index 000000000..0a742a8b2 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.highUtilityPattern.rst.txt @@ -0,0 +1,19 @@ +PAMI.highUtilityPattern package +=============================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.highUtilityPattern.basic + PAMI.highUtilityPattern.parallel + +Module contents +--------------- + +.. automodule:: PAMI.highUtilityPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.highUtilityPatternsInStreams.rst.txt b/sphinx/_build/html/_sources/PAMI.highUtilityPatternsInStreams.rst.txt new file mode 100644 index 000000000..7d13f8055 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.highUtilityPatternsInStreams.rst.txt @@ -0,0 +1,37 @@ +PAMI.highUtilityPatternsInStreams package +========================================= + +Submodules +---------- + +PAMI.highUtilityPatternsInStreams.HUPMS module +---------------------------------------------- + +.. automodule:: PAMI.highUtilityPatternsInStreams.HUPMS + :members: + :undoc-members: + :show-inheritance: + +PAMI.highUtilityPatternsInStreams.SHUGrowth module +-------------------------------------------------- + +.. automodule:: PAMI.highUtilityPatternsInStreams.SHUGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.highUtilityPatternsInStreams.abstract module +------------------------------------------------- + +.. automodule:: PAMI.highUtilityPatternsInStreams.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.highUtilityPatternsInStreams + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.highUtilitySpatialPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.highUtilitySpatialPattern.basic.rst.txt new file mode 100644 index 000000000..28e94f255 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.highUtilitySpatialPattern.basic.rst.txt @@ -0,0 +1,37 @@ +PAMI.highUtilitySpatialPattern.basic package +============================================ + +Submodules +---------- + +PAMI.highUtilitySpatialPattern.basic.HDSHUIM module +--------------------------------------------------- + +.. automodule:: PAMI.highUtilitySpatialPattern.basic.HDSHUIM + :members: + :undoc-members: + :show-inheritance: + +PAMI.highUtilitySpatialPattern.basic.SHUIM module +------------------------------------------------- + +.. automodule:: PAMI.highUtilitySpatialPattern.basic.SHUIM + :members: + :undoc-members: + :show-inheritance: + +PAMI.highUtilitySpatialPattern.basic.abstract module +---------------------------------------------------- + +.. automodule:: PAMI.highUtilitySpatialPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.highUtilitySpatialPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.highUtilitySpatialPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.highUtilitySpatialPattern.rst.txt new file mode 100644 index 000000000..b3454bec3 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.highUtilitySpatialPattern.rst.txt @@ -0,0 +1,30 @@ +PAMI.highUtilitySpatialPattern package +====================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.highUtilitySpatialPattern.basic + PAMI.highUtilitySpatialPattern.topk + +Submodules +---------- + +PAMI.highUtilitySpatialPattern.abstract module +---------------------------------------------- + +.. automodule:: PAMI.highUtilitySpatialPattern.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.highUtilitySpatialPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.highUtilitySpatialPattern.topk.rst.txt b/sphinx/_build/html/_sources/PAMI.highUtilitySpatialPattern.topk.rst.txt new file mode 100644 index 000000000..be651127b --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.highUtilitySpatialPattern.topk.rst.txt @@ -0,0 +1,29 @@ +PAMI.highUtilitySpatialPattern.topk package +=========================================== + +Submodules +---------- + +PAMI.highUtilitySpatialPattern.topk.TKSHUIM module +-------------------------------------------------- + +.. automodule:: PAMI.highUtilitySpatialPattern.topk.TKSHUIM + :members: + :undoc-members: + :show-inheritance: + +PAMI.highUtilitySpatialPattern.topk.abstract module +--------------------------------------------------- + +.. automodule:: PAMI.highUtilitySpatialPattern.topk.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.highUtilitySpatialPattern.topk + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.localPeriodicPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.localPeriodicPattern.basic.rst.txt new file mode 100644 index 000000000..d865ce58a --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.localPeriodicPattern.basic.rst.txt @@ -0,0 +1,45 @@ +PAMI.localPeriodicPattern.basic package +======================================= + +Submodules +---------- + +PAMI.localPeriodicPattern.basic.LPPGrowth module +------------------------------------------------ + +.. automodule:: PAMI.localPeriodicPattern.basic.LPPGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.localPeriodicPattern.basic.LPPMBreadth module +-------------------------------------------------- + +.. automodule:: PAMI.localPeriodicPattern.basic.LPPMBreadth + :members: + :undoc-members: + :show-inheritance: + +PAMI.localPeriodicPattern.basic.LPPMDepth module +------------------------------------------------ + +.. automodule:: PAMI.localPeriodicPattern.basic.LPPMDepth + :members: + :undoc-members: + :show-inheritance: + +PAMI.localPeriodicPattern.basic.abstract module +----------------------------------------------- + +.. automodule:: PAMI.localPeriodicPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.localPeriodicPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.localPeriodicPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.localPeriodicPattern.rst.txt new file mode 100644 index 000000000..71f810f1b --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.localPeriodicPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.localPeriodicPattern package +================================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.localPeriodicPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.localPeriodicPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.multipleMinimumSupportBasedFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.multipleMinimumSupportBasedFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..4a0b92f2c --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.multipleMinimumSupportBasedFrequentPattern.basic.rst.txt @@ -0,0 +1,37 @@ +PAMI.multipleMinimumSupportBasedFrequentPattern.basic package +============================================================= + +Submodules +---------- + +PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth module +---------------------------------------------------------------------- + +.. automodule:: PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus module +-------------------------------------------------------------------------- + +.. automodule:: PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus + :members: + :undoc-members: + :show-inheritance: + +PAMI.multipleMinimumSupportBasedFrequentPattern.basic.abstract module +--------------------------------------------------------------------- + +.. automodule:: PAMI.multipleMinimumSupportBasedFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.multipleMinimumSupportBasedFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.multipleMinimumSupportBasedFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.multipleMinimumSupportBasedFrequentPattern.rst.txt new file mode 100644 index 000000000..e75617d41 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.multipleMinimumSupportBasedFrequentPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.multipleMinimumSupportBasedFrequentPattern package +======================================================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.multipleMinimumSupportBasedFrequentPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.multipleMinimumSupportBasedFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.partialPeriodicFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.partialPeriodicFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..5c63ede2d --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.partialPeriodicFrequentPattern.basic.rst.txt @@ -0,0 +1,37 @@ +PAMI.partialPeriodicFrequentPattern.basic package +================================================= + +Submodules +---------- + +PAMI.partialPeriodicFrequentPattern.basic.GPFgrowth module +---------------------------------------------------------- + +.. automodule:: PAMI.partialPeriodicFrequentPattern.basic.GPFgrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.partialPeriodicFrequentPattern.basic.PPF\_DFS module +--------------------------------------------------------- + +.. automodule:: PAMI.partialPeriodicFrequentPattern.basic.PPF_DFS + :members: + :undoc-members: + :show-inheritance: + +PAMI.partialPeriodicFrequentPattern.basic.abstract module +--------------------------------------------------------- + +.. automodule:: PAMI.partialPeriodicFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.partialPeriodicFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.partialPeriodicFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.partialPeriodicFrequentPattern.rst.txt new file mode 100644 index 000000000..0b2dd8938 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.partialPeriodicFrequentPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.partialPeriodicFrequentPattern package +=========================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.partialPeriodicFrequentPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.partialPeriodicFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.partialPeriodicPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.partialPeriodicPattern.basic.rst.txt new file mode 100644 index 000000000..aed795097 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.partialPeriodicPattern.basic.rst.txt @@ -0,0 +1,53 @@ +PAMI.partialPeriodicPattern.basic package +========================================= + +Submodules +---------- + +PAMI.partialPeriodicPattern.basic.GThreePGrowth module +------------------------------------------------------ + +.. automodule:: PAMI.partialPeriodicPattern.basic.GThreePGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.partialPeriodicPattern.basic.Gabstract module +-------------------------------------------------- + +.. automodule:: PAMI.partialPeriodicPattern.basic.Gabstract + :members: + :undoc-members: + :show-inheritance: + +PAMI.partialPeriodicPattern.basic.PPPGrowth module +-------------------------------------------------- + +.. automodule:: PAMI.partialPeriodicPattern.basic.PPPGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.partialPeriodicPattern.basic.PPP\_ECLAT module +--------------------------------------------------- + +.. automodule:: PAMI.partialPeriodicPattern.basic.PPP_ECLAT + :members: + :undoc-members: + :show-inheritance: + +PAMI.partialPeriodicPattern.basic.abstract module +------------------------------------------------- + +.. automodule:: PAMI.partialPeriodicPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.partialPeriodicPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.partialPeriodicPattern.closed.rst.txt b/sphinx/_build/html/_sources/PAMI.partialPeriodicPattern.closed.rst.txt new file mode 100644 index 000000000..7cd362086 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.partialPeriodicPattern.closed.rst.txt @@ -0,0 +1,29 @@ +PAMI.partialPeriodicPattern.closed package +========================================== + +Submodules +---------- + +PAMI.partialPeriodicPattern.closed.PPPClose module +-------------------------------------------------- + +.. automodule:: PAMI.partialPeriodicPattern.closed.PPPClose + :members: + :undoc-members: + :show-inheritance: + +PAMI.partialPeriodicPattern.closed.abstract module +-------------------------------------------------- + +.. automodule:: PAMI.partialPeriodicPattern.closed.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.partialPeriodicPattern.closed + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.partialPeriodicPattern.maximal.rst.txt b/sphinx/_build/html/_sources/PAMI.partialPeriodicPattern.maximal.rst.txt new file mode 100644 index 000000000..fc05d39dc --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.partialPeriodicPattern.maximal.rst.txt @@ -0,0 +1,29 @@ +PAMI.partialPeriodicPattern.maximal package +=========================================== + +Submodules +---------- + +PAMI.partialPeriodicPattern.maximal.Max3PGrowth module +------------------------------------------------------ + +.. automodule:: PAMI.partialPeriodicPattern.maximal.Max3PGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.partialPeriodicPattern.maximal.abstract module +--------------------------------------------------- + +.. automodule:: PAMI.partialPeriodicPattern.maximal.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.partialPeriodicPattern.maximal + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.partialPeriodicPattern.pyspark.rst.txt b/sphinx/_build/html/_sources/PAMI.partialPeriodicPattern.pyspark.rst.txt new file mode 100644 index 000000000..c3f446b4b --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.partialPeriodicPattern.pyspark.rst.txt @@ -0,0 +1,29 @@ +PAMI.partialPeriodicPattern.pyspark package +=========================================== + +Submodules +---------- + +PAMI.partialPeriodicPattern.pyspark.abstract module +--------------------------------------------------- + +.. automodule:: PAMI.partialPeriodicPattern.pyspark.abstract + :members: + :undoc-members: + :show-inheritance: + +PAMI.partialPeriodicPattern.pyspark.parallel3PGrowth module +----------------------------------------------------------- + +.. automodule:: PAMI.partialPeriodicPattern.pyspark.parallel3PGrowth + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.partialPeriodicPattern.pyspark + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.partialPeriodicPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.partialPeriodicPattern.rst.txt new file mode 100644 index 000000000..a9393728c --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.partialPeriodicPattern.rst.txt @@ -0,0 +1,22 @@ +PAMI.partialPeriodicPattern package +=================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.partialPeriodicPattern.basic + PAMI.partialPeriodicPattern.closed + PAMI.partialPeriodicPattern.maximal + PAMI.partialPeriodicPattern.pyspark + PAMI.partialPeriodicPattern.topk + +Module contents +--------------- + +.. automodule:: PAMI.partialPeriodicPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.partialPeriodicPattern.topk.rst.txt b/sphinx/_build/html/_sources/PAMI.partialPeriodicPattern.topk.rst.txt new file mode 100644 index 000000000..396d98f55 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.partialPeriodicPattern.topk.rst.txt @@ -0,0 +1,29 @@ +PAMI.partialPeriodicPattern.topk package +======================================== + +Submodules +---------- + +PAMI.partialPeriodicPattern.topk.abstract module +------------------------------------------------ + +.. automodule:: PAMI.partialPeriodicPattern.topk.abstract + :members: + :undoc-members: + :show-inheritance: + +PAMI.partialPeriodicPattern.topk.k3PMiner module +------------------------------------------------ + +.. automodule:: PAMI.partialPeriodicPattern.topk.k3PMiner + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.partialPeriodicPattern.topk + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.partialPeriodicPatternInMultipleTimeSeries.rst.txt b/sphinx/_build/html/_sources/PAMI.partialPeriodicPatternInMultipleTimeSeries.rst.txt new file mode 100644 index 000000000..3d0d4a413 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.partialPeriodicPatternInMultipleTimeSeries.rst.txt @@ -0,0 +1,29 @@ +PAMI.partialPeriodicPatternInMultipleTimeSeries package +======================================================= + +Submodules +---------- + +PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth module +--------------------------------------------------------------- + +.. automodule:: PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.partialPeriodicPatternInMultipleTimeSeries.abstract module +--------------------------------------------------------------- + +.. automodule:: PAMI.partialPeriodicPatternInMultipleTimeSeries.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.partialPeriodicPatternInMultipleTimeSeries + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.periodicCorrelatedPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.periodicCorrelatedPattern.basic.rst.txt new file mode 100644 index 000000000..55a3e53b7 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.periodicCorrelatedPattern.basic.rst.txt @@ -0,0 +1,29 @@ +PAMI.periodicCorrelatedPattern.basic package +============================================ + +Submodules +---------- + +PAMI.periodicCorrelatedPattern.basic.EPCPGrowth module +------------------------------------------------------ + +.. automodule:: PAMI.periodicCorrelatedPattern.basic.EPCPGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.periodicCorrelatedPattern.basic.abstract module +---------------------------------------------------- + +.. automodule:: PAMI.periodicCorrelatedPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.periodicCorrelatedPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.periodicCorrelatedPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.periodicCorrelatedPattern.rst.txt new file mode 100644 index 000000000..c34c66882 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.periodicCorrelatedPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.periodicCorrelatedPattern package +====================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.periodicCorrelatedPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.periodicCorrelatedPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..371770cba --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.basic.rst.txt @@ -0,0 +1,69 @@ +PAMI.periodicFrequentPattern.basic package +========================================== + +Submodules +---------- + +PAMI.periodicFrequentPattern.basic.PFECLAT module +------------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.basic.PFECLAT + :members: + :undoc-members: + :show-inheritance: + +PAMI.periodicFrequentPattern.basic.PFPGrowth module +--------------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.basic.PFPGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.periodicFrequentPattern.basic.PFPGrowthPlus module +------------------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.basic.PFPGrowthPlus + :members: + :undoc-members: + :show-inheritance: + +PAMI.periodicFrequentPattern.basic.PFPMC module +----------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.basic.PFPMC + :members: + :undoc-members: + :show-inheritance: + +PAMI.periodicFrequentPattern.basic.PSGrowth module +-------------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.basic.PSGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.periodicFrequentPattern.basic.abstract module +-------------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +PAMI.periodicFrequentPattern.basic.parallelPFPGrowth module +----------------------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.basic.parallelPFPGrowth + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.periodicFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.closed.rst.txt b/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.closed.rst.txt new file mode 100644 index 000000000..3a60f14a9 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.closed.rst.txt @@ -0,0 +1,29 @@ +PAMI.periodicFrequentPattern.closed package +=========================================== + +Submodules +---------- + +PAMI.periodicFrequentPattern.closed.CPFPMiner module +---------------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.closed.CPFPMiner + :members: + :undoc-members: + :show-inheritance: + +PAMI.periodicFrequentPattern.closed.abstract module +--------------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.closed.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.periodicFrequentPattern.closed + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.cuda.rst.txt b/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.cuda.rst.txt new file mode 100644 index 000000000..4d79863ca --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.cuda.rst.txt @@ -0,0 +1,37 @@ +PAMI.periodicFrequentPattern.cuda package +========================================= + +Submodules +---------- + +PAMI.periodicFrequentPattern.cuda.abstract module +------------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.cuda.abstract + :members: + :undoc-members: + :show-inheritance: + +PAMI.periodicFrequentPattern.cuda.cuGPFMiner module +--------------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.cuda.cuGPFMiner + :members: + :undoc-members: + :show-inheritance: + +PAMI.periodicFrequentPattern.cuda.gPFMinerBit module +---------------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.cuda.gPFMinerBit + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.periodicFrequentPattern.cuda + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.maximal.rst.txt b/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.maximal.rst.txt new file mode 100644 index 000000000..d0f2c3c7d --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.maximal.rst.txt @@ -0,0 +1,29 @@ +PAMI.periodicFrequentPattern.maximal package +============================================ + +Submodules +---------- + +PAMI.periodicFrequentPattern.maximal.MaxPFGrowth module +------------------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.maximal.MaxPFGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.periodicFrequentPattern.maximal.abstract module +---------------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.maximal.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.periodicFrequentPattern.maximal + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.pyspark.rst.txt b/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.pyspark.rst.txt new file mode 100644 index 000000000..2010c5a2d --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.pyspark.rst.txt @@ -0,0 +1,29 @@ +PAMI.periodicFrequentPattern.pyspark package +============================================ + +Submodules +---------- + +PAMI.periodicFrequentPattern.pyspark.abstract module +---------------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.pyspark.abstract + :members: + :undoc-members: + :show-inheritance: + +PAMI.periodicFrequentPattern.pyspark.parallelPFPGrowth module +------------------------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.pyspark.parallelPFPGrowth + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.periodicFrequentPattern.pyspark + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.rst.txt new file mode 100644 index 000000000..6b38765f5 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.rst.txt @@ -0,0 +1,23 @@ +PAMI.periodicFrequentPattern package +==================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.periodicFrequentPattern.basic + PAMI.periodicFrequentPattern.closed + PAMI.periodicFrequentPattern.cuda + PAMI.periodicFrequentPattern.maximal + PAMI.periodicFrequentPattern.pyspark + PAMI.periodicFrequentPattern.topk + +Module contents +--------------- + +.. automodule:: PAMI.periodicFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.topk.TopkPFP.rst.txt b/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.topk.TopkPFP.rst.txt new file mode 100644 index 000000000..d7b681280 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.topk.TopkPFP.rst.txt @@ -0,0 +1,29 @@ +PAMI.periodicFrequentPattern.topk.TopkPFP package +================================================= + +Submodules +---------- + +PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP module +-------------------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP + :members: + :undoc-members: + :show-inheritance: + +PAMI.periodicFrequentPattern.topk.TopkPFP.abstract module +--------------------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.topk.TopkPFP.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.periodicFrequentPattern.topk.TopkPFP + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.topk.kPFPMiner.rst.txt b/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.topk.kPFPMiner.rst.txt new file mode 100644 index 000000000..44ff2c51c --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.topk.kPFPMiner.rst.txt @@ -0,0 +1,29 @@ +PAMI.periodicFrequentPattern.topk.kPFPMiner package +=================================================== + +Submodules +---------- + +PAMI.periodicFrequentPattern.topk.kPFPMiner.abstract module +----------------------------------------------------------- + +.. automodule:: PAMI.periodicFrequentPattern.topk.kPFPMiner.abstract + :members: + :undoc-members: + :show-inheritance: + +PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner module +------------------------------------------------------------ + +.. automodule:: PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.periodicFrequentPattern.topk.kPFPMiner + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.topk.rst.txt b/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.topk.rst.txt new file mode 100644 index 000000000..347ff3db8 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.periodicFrequentPattern.topk.rst.txt @@ -0,0 +1,19 @@ +PAMI.periodicFrequentPattern.topk package +========================================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.periodicFrequentPattern.topk.TopkPFP + PAMI.periodicFrequentPattern.topk.kPFPMiner + +Module contents +--------------- + +.. automodule:: PAMI.periodicFrequentPattern.topk + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.recurringPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.recurringPattern.basic.rst.txt new file mode 100644 index 000000000..77c4227cf --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.recurringPattern.basic.rst.txt @@ -0,0 +1,29 @@ +PAMI.recurringPattern.basic package +=================================== + +Submodules +---------- + +PAMI.recurringPattern.basic.RPGrowth module +------------------------------------------- + +.. automodule:: PAMI.recurringPattern.basic.RPGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.recurringPattern.basic.abstract module +------------------------------------------- + +.. automodule:: PAMI.recurringPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.recurringPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.recurringPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.recurringPattern.rst.txt new file mode 100644 index 000000000..1585799d1 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.recurringPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.recurringPattern package +============================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.recurringPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.recurringPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.relativeFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.relativeFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..91f20ded0 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.relativeFrequentPattern.basic.rst.txt @@ -0,0 +1,29 @@ +PAMI.relativeFrequentPattern.basic package +========================================== + +Submodules +---------- + +PAMI.relativeFrequentPattern.basic.RSFPGrowth module +---------------------------------------------------- + +.. automodule:: PAMI.relativeFrequentPattern.basic.RSFPGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.relativeFrequentPattern.basic.abstract module +-------------------------------------------------- + +.. automodule:: PAMI.relativeFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.relativeFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.relativeFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.relativeFrequentPattern.rst.txt new file mode 100644 index 000000000..f8f50d54a --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.relativeFrequentPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.relativeFrequentPattern package +==================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.relativeFrequentPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.relativeFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.relativeHighUtilityPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.relativeHighUtilityPattern.basic.rst.txt new file mode 100644 index 000000000..32f1fa0a4 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.relativeHighUtilityPattern.basic.rst.txt @@ -0,0 +1,29 @@ +PAMI.relativeHighUtilityPattern.basic package +============================================= + +Submodules +---------- + +PAMI.relativeHighUtilityPattern.basic.RHUIM module +-------------------------------------------------- + +.. automodule:: PAMI.relativeHighUtilityPattern.basic.RHUIM + :members: + :undoc-members: + :show-inheritance: + +PAMI.relativeHighUtilityPattern.basic.abstract module +----------------------------------------------------- + +.. automodule:: PAMI.relativeHighUtilityPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.relativeHighUtilityPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.relativeHighUtilityPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.relativeHighUtilityPattern.rst.txt new file mode 100644 index 000000000..341fa59fe --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.relativeHighUtilityPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.relativeHighUtilityPattern package +======================================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.relativeHighUtilityPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.relativeHighUtilityPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.rst.txt b/sphinx/_build/html/_sources/PAMI.rst.txt new file mode 100644 index 000000000..10e40dba3 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.rst.txt @@ -0,0 +1,60 @@ +PAMI package +============ + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.AssociationRules + PAMI.correlatedPattern + PAMI.coveragePattern + PAMI.extras + PAMI.faultTolerantFrequentPattern + PAMI.frequentPattern + PAMI.fuzzyCorrelatedPattern + PAMI.fuzzyFrequentPattern + PAMI.fuzzyGeoreferencedFrequentPattern + PAMI.fuzzyGeoreferencedPeriodicFrequentPattern + PAMI.fuzzyPartialPeriodicPatterns + PAMI.fuzzyPeriodicFrequentPattern + PAMI.geoReferencedPeriodicFrequentPattern + PAMI.georeferencedFrequentPattern + PAMI.georeferencedFrequentSequencePattern + PAMI.georeferencedPartialPeriodicPattern + PAMI.highUtilityFrequentPattern + PAMI.highUtilityGeoreferencedFrequentPattern + PAMI.highUtilityPattern + PAMI.highUtilityPatternsInStreams + PAMI.highUtilitySpatialPattern + PAMI.localPeriodicPattern + PAMI.multipleMinimumSupportBasedFrequentPattern + PAMI.partialPeriodicFrequentPattern + PAMI.partialPeriodicPattern + PAMI.partialPeriodicPatternInMultipleTimeSeries + PAMI.periodicCorrelatedPattern + PAMI.periodicFrequentPattern + PAMI.recurringPattern + PAMI.relativeFrequentPattern + PAMI.relativeHighUtilityPattern + PAMI.sequence + PAMI.sequentialPatternMining + PAMI.stablePeriodicFrequentPattern + PAMI.subgraphMining + PAMI.uncertainFaultTolerantFrequentPattern + PAMI.uncertainFrequentPattern + PAMI.uncertainGeoreferencedFrequentPattern + PAMI.uncertainPeriodicFrequentPattern + PAMI.weightedFrequentNeighbourhoodPattern + PAMI.weightedFrequentPattern + PAMI.weightedFrequentRegularPattern + PAMI.weightedUncertainFrequentPattern + +Module contents +--------------- + +.. automodule:: PAMI + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.sequence.rst.txt b/sphinx/_build/html/_sources/PAMI.sequence.rst.txt new file mode 100644 index 000000000..e527a5794 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.sequence.rst.txt @@ -0,0 +1,10 @@ +PAMI.sequence package +===================== + +Module contents +--------------- + +.. automodule:: PAMI.sequence + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.sequentialPatternMining.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.sequentialPatternMining.basic.rst.txt new file mode 100644 index 000000000..f7751ea01 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.sequentialPatternMining.basic.rst.txt @@ -0,0 +1,45 @@ +PAMI.sequentialPatternMining.basic package +========================================== + +Submodules +---------- + +PAMI.sequentialPatternMining.basic.SPADE module +----------------------------------------------- + +.. automodule:: PAMI.sequentialPatternMining.basic.SPADE + :members: + :undoc-members: + :show-inheritance: + +PAMI.sequentialPatternMining.basic.SPAM module +---------------------------------------------- + +.. automodule:: PAMI.sequentialPatternMining.basic.SPAM + :members: + :undoc-members: + :show-inheritance: + +PAMI.sequentialPatternMining.basic.abstract module +-------------------------------------------------- + +.. automodule:: PAMI.sequentialPatternMining.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +PAMI.sequentialPatternMining.basic.prefixSpan module +---------------------------------------------------- + +.. automodule:: PAMI.sequentialPatternMining.basic.prefixSpan + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.sequentialPatternMining.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.sequentialPatternMining.closed.rst.txt b/sphinx/_build/html/_sources/PAMI.sequentialPatternMining.closed.rst.txt new file mode 100644 index 000000000..653e8f928 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.sequentialPatternMining.closed.rst.txt @@ -0,0 +1,29 @@ +PAMI.sequentialPatternMining.closed package +=========================================== + +Submodules +---------- + +PAMI.sequentialPatternMining.closed.abstract module +--------------------------------------------------- + +.. automodule:: PAMI.sequentialPatternMining.closed.abstract + :members: + :undoc-members: + :show-inheritance: + +PAMI.sequentialPatternMining.closed.bide module +----------------------------------------------- + +.. automodule:: PAMI.sequentialPatternMining.closed.bide + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.sequentialPatternMining.closed + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.sequentialPatternMining.rst.txt b/sphinx/_build/html/_sources/PAMI.sequentialPatternMining.rst.txt new file mode 100644 index 000000000..f00648e8b --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.sequentialPatternMining.rst.txt @@ -0,0 +1,19 @@ +PAMI.sequentialPatternMining package +==================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.sequentialPatternMining.basic + PAMI.sequentialPatternMining.closed + +Module contents +--------------- + +.. automodule:: PAMI.sequentialPatternMining + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.stablePeriodicFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.stablePeriodicFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..e614de089 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.stablePeriodicFrequentPattern.basic.rst.txt @@ -0,0 +1,45 @@ +PAMI.stablePeriodicFrequentPattern.basic package +================================================ + +Submodules +---------- + +PAMI.stablePeriodicFrequentPattern.basic.SPPEclat module +-------------------------------------------------------- + +.. automodule:: PAMI.stablePeriodicFrequentPattern.basic.SPPEclat + :members: + :undoc-members: + :show-inheritance: + +PAMI.stablePeriodicFrequentPattern.basic.SPPGrowth module +--------------------------------------------------------- + +.. automodule:: PAMI.stablePeriodicFrequentPattern.basic.SPPGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.stablePeriodicFrequentPattern.basic.SPPGrowthDump module +------------------------------------------------------------- + +.. automodule:: PAMI.stablePeriodicFrequentPattern.basic.SPPGrowthDump + :members: + :undoc-members: + :show-inheritance: + +PAMI.stablePeriodicFrequentPattern.basic.abstract module +-------------------------------------------------------- + +.. automodule:: PAMI.stablePeriodicFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.stablePeriodicFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.stablePeriodicFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.stablePeriodicFrequentPattern.rst.txt new file mode 100644 index 000000000..665bcd223 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.stablePeriodicFrequentPattern.rst.txt @@ -0,0 +1,19 @@ +PAMI.stablePeriodicFrequentPattern package +========================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.stablePeriodicFrequentPattern.basic + PAMI.stablePeriodicFrequentPattern.topK + +Module contents +--------------- + +.. automodule:: PAMI.stablePeriodicFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.stablePeriodicFrequentPattern.topK.rst.txt b/sphinx/_build/html/_sources/PAMI.stablePeriodicFrequentPattern.topK.rst.txt new file mode 100644 index 000000000..0f274222c --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.stablePeriodicFrequentPattern.topK.rst.txt @@ -0,0 +1,29 @@ +PAMI.stablePeriodicFrequentPattern.topK package +=============================================== + +Submodules +---------- + +PAMI.stablePeriodicFrequentPattern.topK.TSPIN module +---------------------------------------------------- + +.. automodule:: PAMI.stablePeriodicFrequentPattern.topK.TSPIN + :members: + :undoc-members: + :show-inheritance: + +PAMI.stablePeriodicFrequentPattern.topK.abstract module +------------------------------------------------------- + +.. automodule:: PAMI.stablePeriodicFrequentPattern.topK.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.stablePeriodicFrequentPattern.topK + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.subgraphMining.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.subgraphMining.basic.rst.txt new file mode 100644 index 000000000..fe4d33a65 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.subgraphMining.basic.rst.txt @@ -0,0 +1,85 @@ +PAMI.subgraphMining.basic package +================================= + +Submodules +---------- + +PAMI.subgraphMining.basic.abstract module +----------------------------------------- + +.. automodule:: PAMI.subgraphMining.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +PAMI.subgraphMining.basic.dfsCode module +---------------------------------------- + +.. automodule:: PAMI.subgraphMining.basic.dfsCode + :members: + :undoc-members: + :show-inheritance: + +PAMI.subgraphMining.basic.edge module +------------------------------------- + +.. automodule:: PAMI.subgraphMining.basic.edge + :members: + :undoc-members: + :show-inheritance: + +PAMI.subgraphMining.basic.extendedEdge module +--------------------------------------------- + +.. automodule:: PAMI.subgraphMining.basic.extendedEdge + :members: + :undoc-members: + :show-inheritance: + +PAMI.subgraphMining.basic.frequentSubgraph module +------------------------------------------------- + +.. automodule:: PAMI.subgraphMining.basic.frequentSubgraph + :members: + :undoc-members: + :show-inheritance: + +PAMI.subgraphMining.basic.graph module +-------------------------------------- + +.. automodule:: PAMI.subgraphMining.basic.graph + :members: + :undoc-members: + :show-inheritance: + +PAMI.subgraphMining.basic.gspan module +-------------------------------------- + +.. automodule:: PAMI.subgraphMining.basic.gspan + :members: + :undoc-members: + :show-inheritance: + +PAMI.subgraphMining.basic.sparseTriangularMatrix module +------------------------------------------------------- + +.. automodule:: PAMI.subgraphMining.basic.sparseTriangularMatrix + :members: + :undoc-members: + :show-inheritance: + +PAMI.subgraphMining.basic.vertex module +--------------------------------------- + +.. automodule:: PAMI.subgraphMining.basic.vertex + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.subgraphMining.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.subgraphMining.rst.txt b/sphinx/_build/html/_sources/PAMI.subgraphMining.rst.txt new file mode 100644 index 000000000..966149734 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.subgraphMining.rst.txt @@ -0,0 +1,19 @@ +PAMI.subgraphMining package +=========================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.subgraphMining.basic + PAMI.subgraphMining.topK + +Module contents +--------------- + +.. automodule:: PAMI.subgraphMining + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.subgraphMining.topK.rst.txt b/sphinx/_build/html/_sources/PAMI.subgraphMining.topK.rst.txt new file mode 100644 index 000000000..dc41aee6a --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.subgraphMining.topK.rst.txt @@ -0,0 +1,93 @@ +PAMI.subgraphMining.topK package +================================ + +Submodules +---------- + +PAMI.subgraphMining.topK.DFSCode module +--------------------------------------- + +.. automodule:: PAMI.subgraphMining.topK.DFSCode + :members: + :undoc-members: + :show-inheritance: + +PAMI.subgraphMining.topK.DFSThread module +----------------------------------------- + +.. automodule:: PAMI.subgraphMining.topK.DFSThread + :members: + :undoc-members: + :show-inheritance: + +PAMI.subgraphMining.topK.abstract module +---------------------------------------- + +.. automodule:: PAMI.subgraphMining.topK.abstract + :members: + :undoc-members: + :show-inheritance: + +PAMI.subgraphMining.topK.edge module +------------------------------------ + +.. automodule:: PAMI.subgraphMining.topK.edge + :members: + :undoc-members: + :show-inheritance: + +PAMI.subgraphMining.topK.extendedEdge module +-------------------------------------------- + +.. automodule:: PAMI.subgraphMining.topK.extendedEdge + :members: + :undoc-members: + :show-inheritance: + +PAMI.subgraphMining.topK.frequentSubgraph module +------------------------------------------------ + +.. automodule:: PAMI.subgraphMining.topK.frequentSubgraph + :members: + :undoc-members: + :show-inheritance: + +PAMI.subgraphMining.topK.graph module +------------------------------------- + +.. automodule:: PAMI.subgraphMining.topK.graph + :members: + :undoc-members: + :show-inheritance: + +PAMI.subgraphMining.topK.sparseTriangularMatrix module +------------------------------------------------------ + +.. automodule:: PAMI.subgraphMining.topK.sparseTriangularMatrix + :members: + :undoc-members: + :show-inheritance: + +PAMI.subgraphMining.topK.tkg module +----------------------------------- + +.. automodule:: PAMI.subgraphMining.topK.tkg + :members: + :undoc-members: + :show-inheritance: + +PAMI.subgraphMining.topK.vertex module +-------------------------------------- + +.. automodule:: PAMI.subgraphMining.topK.vertex + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.subgraphMining.topK + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.uncertainFaultTolerantFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.uncertainFaultTolerantFrequentPattern.rst.txt new file mode 100644 index 000000000..70ddb24b8 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.uncertainFaultTolerantFrequentPattern.rst.txt @@ -0,0 +1,29 @@ +PAMI.uncertainFaultTolerantFrequentPattern package +================================================== + +Submodules +---------- + +PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine module +---------------------------------------------------------- + +.. automodule:: PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine + :members: + :undoc-members: + :show-inheritance: + +PAMI.uncertainFaultTolerantFrequentPattern.abstract module +---------------------------------------------------------- + +.. automodule:: PAMI.uncertainFaultTolerantFrequentPattern.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.uncertainFaultTolerantFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.uncertainFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.uncertainFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..a38fea7c2 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.uncertainFrequentPattern.basic.rst.txt @@ -0,0 +1,77 @@ +PAMI.uncertainFrequentPattern.basic package +=========================================== + +Submodules +---------- + +PAMI.uncertainFrequentPattern.basic.CUFPTree module +--------------------------------------------------- + +.. automodule:: PAMI.uncertainFrequentPattern.basic.CUFPTree + :members: + :undoc-members: + :show-inheritance: + +PAMI.uncertainFrequentPattern.basic.PUFGrowth module +---------------------------------------------------- + +.. automodule:: PAMI.uncertainFrequentPattern.basic.PUFGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.uncertainFrequentPattern.basic.TUFP module +----------------------------------------------- + +.. automodule:: PAMI.uncertainFrequentPattern.basic.TUFP + :members: + :undoc-members: + :show-inheritance: + +PAMI.uncertainFrequentPattern.basic.TubeP module +------------------------------------------------ + +.. automodule:: PAMI.uncertainFrequentPattern.basic.TubeP + :members: + :undoc-members: + :show-inheritance: + +PAMI.uncertainFrequentPattern.basic.TubeS module +------------------------------------------------ + +.. automodule:: PAMI.uncertainFrequentPattern.basic.TubeS + :members: + :undoc-members: + :show-inheritance: + +PAMI.uncertainFrequentPattern.basic.UFGrowth module +--------------------------------------------------- + +.. automodule:: PAMI.uncertainFrequentPattern.basic.UFGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.uncertainFrequentPattern.basic.UVECLAT module +-------------------------------------------------- + +.. automodule:: PAMI.uncertainFrequentPattern.basic.UVECLAT + :members: + :undoc-members: + :show-inheritance: + +PAMI.uncertainFrequentPattern.basic.abstract module +--------------------------------------------------- + +.. automodule:: PAMI.uncertainFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.uncertainFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.uncertainFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.uncertainFrequentPattern.rst.txt new file mode 100644 index 000000000..f66db7ddb --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.uncertainFrequentPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.uncertainFrequentPattern package +===================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.uncertainFrequentPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.uncertainFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.uncertainGeoreferencedFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.uncertainGeoreferencedFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..3df0cda70 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.uncertainGeoreferencedFrequentPattern.basic.rst.txt @@ -0,0 +1,29 @@ +PAMI.uncertainGeoreferencedFrequentPattern.basic package +======================================================== + +Submodules +---------- + +PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth module +----------------------------------------------------------------- + +.. automodule:: PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.uncertainGeoreferencedFrequentPattern.basic.abstract module +---------------------------------------------------------------- + +.. automodule:: PAMI.uncertainGeoreferencedFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.uncertainGeoreferencedFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.uncertainGeoreferencedFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.uncertainGeoreferencedFrequentPattern.rst.txt new file mode 100644 index 000000000..95b7889f5 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.uncertainGeoreferencedFrequentPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.uncertainGeoreferencedFrequentPattern package +================================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.uncertainGeoreferencedFrequentPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.uncertainGeoreferencedFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.uncertainPeriodicFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.uncertainPeriodicFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..43d1c05f1 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.uncertainPeriodicFrequentPattern.basic.rst.txt @@ -0,0 +1,37 @@ +PAMI.uncertainPeriodicFrequentPattern.basic package +=================================================== + +Submodules +---------- + +PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth module +------------------------------------------------------------- + +.. automodule:: PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus module +----------------------------------------------------------------- + +.. automodule:: PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus + :members: + :undoc-members: + :show-inheritance: + +PAMI.uncertainPeriodicFrequentPattern.basic.abstract module +----------------------------------------------------------- + +.. automodule:: PAMI.uncertainPeriodicFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.uncertainPeriodicFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.uncertainPeriodicFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.uncertainPeriodicFrequentPattern.rst.txt new file mode 100644 index 000000000..930a02095 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.uncertainPeriodicFrequentPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.uncertainPeriodicFrequentPattern package +============================================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.uncertainPeriodicFrequentPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.uncertainPeriodicFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.weightedFrequentNeighbourhoodPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.weightedFrequentNeighbourhoodPattern.basic.rst.txt new file mode 100644 index 000000000..14d155416 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.weightedFrequentNeighbourhoodPattern.basic.rst.txt @@ -0,0 +1,29 @@ +PAMI.weightedFrequentNeighbourhoodPattern.basic package +======================================================= + +Submodules +---------- + +PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth module +----------------------------------------------------------------- + +.. automodule:: PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth + :members: + :undoc-members: + :show-inheritance: + +PAMI.weightedFrequentNeighbourhoodPattern.basic.abstract module +--------------------------------------------------------------- + +.. automodule:: PAMI.weightedFrequentNeighbourhoodPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.weightedFrequentNeighbourhoodPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.weightedFrequentNeighbourhoodPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.weightedFrequentNeighbourhoodPattern.rst.txt new file mode 100644 index 000000000..0d5992a0b --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.weightedFrequentNeighbourhoodPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.weightedFrequentNeighbourhoodPattern package +================================================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.weightedFrequentNeighbourhoodPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.weightedFrequentNeighbourhoodPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.weightedFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.weightedFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..21aa835b5 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.weightedFrequentPattern.basic.rst.txt @@ -0,0 +1,29 @@ +PAMI.weightedFrequentPattern.basic package +========================================== + +Submodules +---------- + +PAMI.weightedFrequentPattern.basic.WFIM module +---------------------------------------------- + +.. automodule:: PAMI.weightedFrequentPattern.basic.WFIM + :members: + :undoc-members: + :show-inheritance: + +PAMI.weightedFrequentPattern.basic.abstract module +-------------------------------------------------- + +.. automodule:: PAMI.weightedFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.weightedFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.weightedFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.weightedFrequentPattern.rst.txt new file mode 100644 index 000000000..9db2895f0 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.weightedFrequentPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.weightedFrequentPattern package +==================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.weightedFrequentPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.weightedFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.weightedFrequentRegularPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.weightedFrequentRegularPattern.basic.rst.txt new file mode 100644 index 000000000..89bba36fc --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.weightedFrequentRegularPattern.basic.rst.txt @@ -0,0 +1,29 @@ +PAMI.weightedFrequentRegularPattern.basic package +================================================= + +Submodules +---------- + +PAMI.weightedFrequentRegularPattern.basic.WFRIMiner module +---------------------------------------------------------- + +.. automodule:: PAMI.weightedFrequentRegularPattern.basic.WFRIMiner + :members: + :undoc-members: + :show-inheritance: + +PAMI.weightedFrequentRegularPattern.basic.abstract module +--------------------------------------------------------- + +.. automodule:: PAMI.weightedFrequentRegularPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.weightedFrequentRegularPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.weightedFrequentRegularPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.weightedFrequentRegularPattern.rst.txt new file mode 100644 index 000000000..08b1cfba8 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.weightedFrequentRegularPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.weightedFrequentRegularPattern package +=========================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.weightedFrequentRegularPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.weightedFrequentRegularPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.weightedUncertainFrequentPattern.basic.rst.txt b/sphinx/_build/html/_sources/PAMI.weightedUncertainFrequentPattern.basic.rst.txt new file mode 100644 index 000000000..a5bd3337b --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.weightedUncertainFrequentPattern.basic.rst.txt @@ -0,0 +1,29 @@ +PAMI.weightedUncertainFrequentPattern.basic package +=================================================== + +Submodules +---------- + +PAMI.weightedUncertainFrequentPattern.basic.WUFIM module +-------------------------------------------------------- + +.. automodule:: PAMI.weightedUncertainFrequentPattern.basic.WUFIM + :members: + :undoc-members: + :show-inheritance: + +PAMI.weightedUncertainFrequentPattern.basic.abstract module +----------------------------------------------------------- + +.. automodule:: PAMI.weightedUncertainFrequentPattern.basic.abstract + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: PAMI.weightedUncertainFrequentPattern.basic + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/PAMI.weightedUncertainFrequentPattern.rst.txt b/sphinx/_build/html/_sources/PAMI.weightedUncertainFrequentPattern.rst.txt new file mode 100644 index 000000000..8cfa6d5f4 --- /dev/null +++ b/sphinx/_build/html/_sources/PAMI.weightedUncertainFrequentPattern.rst.txt @@ -0,0 +1,18 @@ +PAMI.weightedUncertainFrequentPattern package +============================================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + PAMI.weightedUncertainFrequentPattern.basic + +Module contents +--------------- + +.. automodule:: PAMI.weightedUncertainFrequentPattern + :members: + :undoc-members: + :show-inheritance: diff --git a/sphinx/_build/html/_sources/index.rst.txt b/sphinx/_build/html/_sources/index.rst.txt new file mode 100644 index 000000000..1af5a2a8c --- /dev/null +++ b/sphinx/_build/html/_sources/index.rst.txt @@ -0,0 +1,20 @@ +.. PAMI documentation master file, created by + sphinx-quickstart on Tue Apr 23 11:56:25 2024. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to PAMI's documentation! +================================ + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + modules + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/sphinx/_build/html/_sources/modules.rst.txt b/sphinx/_build/html/_sources/modules.rst.txt new file mode 100644 index 000000000..5ca914e08 --- /dev/null +++ b/sphinx/_build/html/_sources/modules.rst.txt @@ -0,0 +1,7 @@ +PAMI +==== + +.. toctree:: + :maxdepth: 4 + + PAMI diff --git a/sphinx/_build/html/_static/_sphinx_javascript_frameworks_compat.js b/sphinx/_build/html/_static/_sphinx_javascript_frameworks_compat.js new file mode 100644 index 000000000..81415803e --- /dev/null +++ b/sphinx/_build/html/_static/_sphinx_javascript_frameworks_compat.js @@ -0,0 +1,123 @@ +/* Compatability shim for jQuery and underscores.js. + * + * Copyright Sphinx contributors + * Released under the two clause BSD licence + */ + +/** + * small helper function to urldecode strings + * + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL + */ +jQuery.urldecode = function(x) { + if (!x) { + return x + } + return decodeURIComponent(x.replace(/\+/g, ' ')); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} diff --git a/sphinx/_build/html/_static/basic.css b/sphinx/_build/html/_static/basic.css new file mode 100644 index 000000000..30fee9d0f --- /dev/null +++ b/sphinx/_build/html/_static/basic.css @@ -0,0 +1,925 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a:visited { + color: #551A8B; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +.translated { + background-color: rgba(207, 255, 207, 0.2) +} + +.untranslated { + background-color: rgba(255, 207, 207, 0.2) +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/sphinx/_build/html/_static/css/badge_only.css b/sphinx/_build/html/_static/css/badge_only.css new file mode 100644 index 000000000..c718cee44 --- /dev/null +++ b/sphinx/_build/html/_static/css/badge_only.css @@ -0,0 +1 @@ +.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} \ No newline at end of file diff --git a/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff b/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff new file mode 100644 index 000000000..6cb600001 Binary files /dev/null and b/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff differ diff --git a/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 b/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 new file mode 100644 index 000000000..7059e2314 Binary files /dev/null and b/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 differ diff --git a/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff b/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff new file mode 100644 index 000000000..f815f63f9 Binary files /dev/null and b/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff differ diff --git a/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 b/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 new file mode 100644 index 000000000..f2c76e5bd Binary files /dev/null and b/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 differ diff --git a/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.eot b/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.eot new file mode 100644 index 000000000..e9f60ca95 Binary files /dev/null and b/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.eot differ diff --git a/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.svg b/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.svg new file mode 100644 index 000000000..855c845e5 --- /dev/null +++ b/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.svg @@ -0,0 +1,2671 @@ + + + + +Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 + By ,,, +Copyright Dave Gandy 2016. All rights reserved. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.ttf b/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.ttf new file mode 100644 index 000000000..35acda2fa Binary files /dev/null and b/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.ttf differ diff --git a/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.woff b/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.woff new file mode 100644 index 000000000..400014a4b Binary files /dev/null and b/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.woff differ diff --git a/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.woff2 b/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.woff2 new file mode 100644 index 000000000..4d13fc604 Binary files /dev/null and b/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.woff2 differ diff --git a/sphinx/_build/html/_static/css/fonts/lato-bold-italic.woff b/sphinx/_build/html/_static/css/fonts/lato-bold-italic.woff new file mode 100644 index 000000000..88ad05b9f Binary files /dev/null and b/sphinx/_build/html/_static/css/fonts/lato-bold-italic.woff differ diff --git a/sphinx/_build/html/_static/css/fonts/lato-bold-italic.woff2 b/sphinx/_build/html/_static/css/fonts/lato-bold-italic.woff2 new file mode 100644 index 000000000..c4e3d804b Binary files /dev/null and b/sphinx/_build/html/_static/css/fonts/lato-bold-italic.woff2 differ diff --git a/sphinx/_build/html/_static/css/fonts/lato-bold.woff b/sphinx/_build/html/_static/css/fonts/lato-bold.woff new file mode 100644 index 000000000..c6dff51f0 Binary files /dev/null and b/sphinx/_build/html/_static/css/fonts/lato-bold.woff differ diff --git a/sphinx/_build/html/_static/css/fonts/lato-bold.woff2 b/sphinx/_build/html/_static/css/fonts/lato-bold.woff2 new file mode 100644 index 000000000..bb195043c Binary files /dev/null and b/sphinx/_build/html/_static/css/fonts/lato-bold.woff2 differ diff --git a/sphinx/_build/html/_static/css/fonts/lato-normal-italic.woff b/sphinx/_build/html/_static/css/fonts/lato-normal-italic.woff new file mode 100644 index 000000000..76114bc03 Binary files /dev/null and b/sphinx/_build/html/_static/css/fonts/lato-normal-italic.woff differ diff --git a/sphinx/_build/html/_static/css/fonts/lato-normal-italic.woff2 b/sphinx/_build/html/_static/css/fonts/lato-normal-italic.woff2 new file mode 100644 index 000000000..3404f37e2 Binary files /dev/null and b/sphinx/_build/html/_static/css/fonts/lato-normal-italic.woff2 differ diff --git a/sphinx/_build/html/_static/css/fonts/lato-normal.woff b/sphinx/_build/html/_static/css/fonts/lato-normal.woff new file mode 100644 index 000000000..ae1307ff5 Binary files /dev/null and b/sphinx/_build/html/_static/css/fonts/lato-normal.woff differ diff --git a/sphinx/_build/html/_static/css/fonts/lato-normal.woff2 b/sphinx/_build/html/_static/css/fonts/lato-normal.woff2 new file mode 100644 index 000000000..3bf984332 Binary files /dev/null and b/sphinx/_build/html/_static/css/fonts/lato-normal.woff2 differ diff --git a/sphinx/_build/html/_static/css/theme.css b/sphinx/_build/html/_static/css/theme.css new file mode 100644 index 000000000..19a446a0e --- /dev/null +++ b/sphinx/_build/html/_static/css/theme.css @@ -0,0 +1,4 @@ +html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden],audio:not([controls]){display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;text-decoration:none}ins,mark{color:#000}mark{background:#ff0;font-style:italic;font-weight:700}.rst-content code,.rst-content tt,code,kbd,pre,samp{font-family:monospace,serif;_font-family:courier new,monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:after,q:before{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}dl,ol,ul{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure,form{margin:0}label{cursor:pointer}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type=button],input[type=reset],input[type=submit]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}textarea{resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none!important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{body,html,section{background:none!important}*{box-shadow:none!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}.rst-content .toctree-wrapper>p.caption,h2,h3,p{orphans:3;widows:3}.rst-content .toctree-wrapper>p.caption,h2,h3{page-break-after:avoid}}.btn,.fa:before,.icon:before,.rst-content .admonition,.rst-content .admonition-title:before,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .code-block-caption .headerlink:before,.rst-content .danger,.rst-content .eqno .headerlink:before,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-alert,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before,input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:FontAwesome;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa-pull-left.icon,.fa.fa-pull-left,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content .eqno .fa-pull-left.headerlink,.rst-content .fa-pull-left.admonition-title,.rst-content code.download span.fa-pull-left:first-child,.rst-content dl dt .fa-pull-left.headerlink,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content p .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.wy-menu-vertical li.current>a button.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-left.toctree-expand,.wy-menu-vertical li button.fa-pull-left.toctree-expand{margin-right:.3em}.fa-pull-right.icon,.fa.fa-pull-right,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content .eqno .fa-pull-right.headerlink,.rst-content .fa-pull-right.admonition-title,.rst-content code.download span.fa-pull-right:first-child,.rst-content dl dt .fa-pull-right.headerlink,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content p .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.wy-menu-vertical li.current>a button.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-right.toctree-expand,.wy-menu-vertical li button.fa-pull-right.toctree-expand{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.pull-left.icon,.rst-content .code-block-caption .pull-left.headerlink,.rst-content .eqno .pull-left.headerlink,.rst-content .pull-left.admonition-title,.rst-content code.download span.pull-left:first-child,.rst-content dl dt .pull-left.headerlink,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content p .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.wy-menu-vertical li.current>a button.pull-left.toctree-expand,.wy-menu-vertical li.on a button.pull-left.toctree-expand,.wy-menu-vertical li button.pull-left.toctree-expand{margin-right:.3em}.fa.pull-right,.pull-right.icon,.rst-content .code-block-caption .pull-right.headerlink,.rst-content .eqno .pull-right.headerlink,.rst-content .pull-right.admonition-title,.rst-content code.download span.pull-right:first-child,.rst-content dl dt .pull-right.headerlink,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content p .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.wy-menu-vertical li.current>a button.pull-right.toctree-expand,.wy-menu-vertical li.on a button.pull-right.toctree-expand,.wy-menu-vertical li button.pull-right.toctree-expand{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-close:before,.fa-remove:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-cog:before,.fa-gear:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-repeat:before,.fa-rotate-right:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.rst-content .admonition-title:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-exclamation-triangle:before,.fa-warning:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-cogs:before,.fa-gears:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-floppy-o:before,.fa-save:before{content:""}.fa-square:before{content:""}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.icon-caret-down:before,.wy-dropdown .caret:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-sort:before,.fa-unsorted:before{content:""}.fa-sort-desc:before,.fa-sort-down:before{content:""}.fa-sort-asc:before,.fa-sort-up:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-gavel:before,.fa-legal:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-bolt:before,.fa-flash:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-clipboard:before,.fa-paste:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-chain-broken:before,.fa-unlink:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:""}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:""}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:""}.fa-eur:before,.fa-euro:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-inr:before,.fa-rupee:before{content:""}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:""}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:""}.fa-krw:before,.fa-won:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-try:before,.fa-turkish-lira:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li button.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-bank:before,.fa-institution:before,.fa-university:before{content:""}.fa-graduation-cap:before,.fa-mortar-board:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:""}.fa-file-archive-o:before,.fa-file-zip-o:before{content:""}.fa-file-audio-o:before,.fa-file-sound-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:""}.fa-empire:before,.fa-ge:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-paper-plane:before,.fa-send:before{content:""}.fa-paper-plane-o:before,.fa-send-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-bed:before,.fa-hotel:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-y-combinator:before,.fa-yc:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-television:before,.fa-tv:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:""}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-sign-language:before,.fa-signing:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-address-card:before,.fa-vcard:before{content:""}.fa-address-card-o:before,.fa-vcard-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{font-family:inherit}.fa:before,.icon:before,.rst-content .admonition-title:before,.rst-content .code-block-caption .headerlink:before,.rst-content .eqno .headerlink:before,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before{font-family:FontAwesome;display:inline-block;font-style:normal;font-weight:400;line-height:1;text-decoration:inherit}.rst-content .code-block-caption a .headerlink,.rst-content .eqno a .headerlink,.rst-content a .admonition-title,.rst-content code.download a span:first-child,.rst-content dl dt a .headerlink,.rst-content h1 a .headerlink,.rst-content h2 a .headerlink,.rst-content h3 a .headerlink,.rst-content h4 a .headerlink,.rst-content h5 a .headerlink,.rst-content h6 a .headerlink,.rst-content p.caption a .headerlink,.rst-content p a .headerlink,.rst-content table>caption a .headerlink,.rst-content tt.download a span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li a button.toctree-expand,a .fa,a .icon,a .rst-content .admonition-title,a .rst-content .code-block-caption .headerlink,a .rst-content .eqno .headerlink,a .rst-content code.download span:first-child,a .rst-content dl dt .headerlink,a .rst-content h1 .headerlink,a .rst-content h2 .headerlink,a .rst-content h3 .headerlink,a .rst-content h4 .headerlink,a .rst-content h5 .headerlink,a .rst-content h6 .headerlink,a .rst-content p.caption .headerlink,a .rst-content p .headerlink,a .rst-content table>caption .headerlink,a .rst-content tt.download span:first-child,a .wy-menu-vertical li button.toctree-expand{display:inline-block;text-decoration:inherit}.btn .fa,.btn .icon,.btn .rst-content .admonition-title,.btn .rst-content .code-block-caption .headerlink,.btn .rst-content .eqno .headerlink,.btn .rst-content code.download span:first-child,.btn .rst-content dl dt .headerlink,.btn .rst-content h1 .headerlink,.btn .rst-content h2 .headerlink,.btn .rst-content h3 .headerlink,.btn .rst-content h4 .headerlink,.btn .rst-content h5 .headerlink,.btn .rst-content h6 .headerlink,.btn .rst-content p .headerlink,.btn .rst-content table>caption .headerlink,.btn .rst-content tt.download span:first-child,.btn .wy-menu-vertical li.current>a button.toctree-expand,.btn .wy-menu-vertical li.on a button.toctree-expand,.btn .wy-menu-vertical li button.toctree-expand,.nav .fa,.nav .icon,.nav .rst-content .admonition-title,.nav .rst-content .code-block-caption .headerlink,.nav .rst-content .eqno .headerlink,.nav .rst-content code.download span:first-child,.nav .rst-content dl dt .headerlink,.nav .rst-content h1 .headerlink,.nav .rst-content h2 .headerlink,.nav .rst-content h3 .headerlink,.nav .rst-content h4 .headerlink,.nav .rst-content h5 .headerlink,.nav .rst-content h6 .headerlink,.nav .rst-content p .headerlink,.nav .rst-content table>caption .headerlink,.nav .rst-content tt.download span:first-child,.nav .wy-menu-vertical li.current>a button.toctree-expand,.nav .wy-menu-vertical li.on a button.toctree-expand,.nav .wy-menu-vertical li button.toctree-expand,.rst-content .btn .admonition-title,.rst-content .code-block-caption .btn .headerlink,.rst-content .code-block-caption .nav .headerlink,.rst-content .eqno .btn .headerlink,.rst-content .eqno .nav .headerlink,.rst-content .nav .admonition-title,.rst-content code.download .btn span:first-child,.rst-content code.download .nav span:first-child,.rst-content dl dt .btn .headerlink,.rst-content dl dt .nav .headerlink,.rst-content h1 .btn .headerlink,.rst-content h1 .nav .headerlink,.rst-content h2 .btn .headerlink,.rst-content h2 .nav .headerlink,.rst-content h3 .btn .headerlink,.rst-content h3 .nav .headerlink,.rst-content h4 .btn .headerlink,.rst-content h4 .nav .headerlink,.rst-content h5 .btn .headerlink,.rst-content h5 .nav .headerlink,.rst-content h6 .btn .headerlink,.rst-content h6 .nav .headerlink,.rst-content p .btn .headerlink,.rst-content p .nav .headerlink,.rst-content table>caption .btn .headerlink,.rst-content table>caption .nav .headerlink,.rst-content tt.download .btn span:first-child,.rst-content tt.download .nav span:first-child,.wy-menu-vertical li .btn button.toctree-expand,.wy-menu-vertical li.current>a .btn button.toctree-expand,.wy-menu-vertical li.current>a .nav button.toctree-expand,.wy-menu-vertical li .nav button.toctree-expand,.wy-menu-vertical li.on a .btn button.toctree-expand,.wy-menu-vertical li.on a .nav button.toctree-expand{display:inline}.btn .fa-large.icon,.btn .fa.fa-large,.btn .rst-content .code-block-caption .fa-large.headerlink,.btn .rst-content .eqno .fa-large.headerlink,.btn .rst-content .fa-large.admonition-title,.btn .rst-content code.download span.fa-large:first-child,.btn .rst-content dl dt .fa-large.headerlink,.btn .rst-content h1 .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.btn .rst-content p .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.btn .wy-menu-vertical li button.fa-large.toctree-expand,.nav .fa-large.icon,.nav .fa.fa-large,.nav .rst-content .code-block-caption .fa-large.headerlink,.nav .rst-content .eqno .fa-large.headerlink,.nav .rst-content .fa-large.admonition-title,.nav .rst-content code.download span.fa-large:first-child,.nav .rst-content dl dt .fa-large.headerlink,.nav .rst-content h1 .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.nav .rst-content p .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.nav .wy-menu-vertical li button.fa-large.toctree-expand,.rst-content .btn .fa-large.admonition-title,.rst-content .code-block-caption .btn .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.rst-content .eqno .btn .fa-large.headerlink,.rst-content .eqno .nav .fa-large.headerlink,.rst-content .nav .fa-large.admonition-title,.rst-content code.download .btn span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.rst-content dl dt .btn .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.rst-content p .btn .fa-large.headerlink,.rst-content p .nav .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.rst-content tt.download .btn span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.wy-menu-vertical li .btn button.fa-large.toctree-expand,.wy-menu-vertical li .nav button.fa-large.toctree-expand{line-height:.9em}.btn .fa-spin.icon,.btn .fa.fa-spin,.btn .rst-content .code-block-caption .fa-spin.headerlink,.btn .rst-content .eqno .fa-spin.headerlink,.btn .rst-content .fa-spin.admonition-title,.btn .rst-content code.download span.fa-spin:first-child,.btn .rst-content dl dt .fa-spin.headerlink,.btn .rst-content h1 .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.btn .rst-content p .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.btn .wy-menu-vertical li button.fa-spin.toctree-expand,.nav .fa-spin.icon,.nav .fa.fa-spin,.nav .rst-content .code-block-caption .fa-spin.headerlink,.nav .rst-content .eqno .fa-spin.headerlink,.nav .rst-content .fa-spin.admonition-title,.nav .rst-content code.download span.fa-spin:first-child,.nav .rst-content dl dt .fa-spin.headerlink,.nav .rst-content h1 .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.nav .rst-content p .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.nav .wy-menu-vertical li button.fa-spin.toctree-expand,.rst-content .btn .fa-spin.admonition-title,.rst-content .code-block-caption .btn .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.rst-content .eqno .btn .fa-spin.headerlink,.rst-content .eqno .nav .fa-spin.headerlink,.rst-content .nav .fa-spin.admonition-title,.rst-content code.download .btn span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.rst-content dl dt .btn .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.rst-content p .btn .fa-spin.headerlink,.rst-content p .nav .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.rst-content tt.download .btn span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.wy-menu-vertical li .btn button.fa-spin.toctree-expand,.wy-menu-vertical li .nav button.fa-spin.toctree-expand{display:inline-block}.btn.fa:before,.btn.icon:before,.rst-content .btn.admonition-title:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content .eqno .btn.headerlink:before,.rst-content code.download span.btn:first-child:before,.rst-content dl dt .btn.headerlink:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content p .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.wy-menu-vertical li button.btn.toctree-expand:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.btn.icon:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content .eqno .btn.headerlink:hover:before,.rst-content code.download span.btn:first-child:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content p .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.wy-menu-vertical li button.btn.toctree-expand:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .icon:before,.btn-mini .rst-content .admonition-title:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.btn-mini .rst-content .eqno .headerlink:before,.btn-mini .rst-content code.download span:first-child:before,.btn-mini .rst-content dl dt .headerlink:before,.btn-mini .rst-content h1 .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.btn-mini .rst-content p .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.btn-mini .wy-menu-vertical li button.toctree-expand:before,.rst-content .btn-mini .admonition-title:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.rst-content .eqno .btn-mini .headerlink:before,.rst-content code.download .btn-mini span:first-child:before,.rst-content dl dt .btn-mini .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.rst-content p .btn-mini .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.rst-content tt.download .btn-mini span:first-child:before,.wy-menu-vertical li .btn-mini button.toctree-expand:before{font-size:14px;vertical-align:-15%}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.wy-alert{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.rst-content .admonition-title,.wy-alert-title{font-weight:700;display:block;color:#fff;background:#6ab0de;padding:6px 12px;margin:-12px -12px 12px}.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.admonition,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.wy-alert.wy-alert-danger{background:#fdf3f2}.rst-content .danger .admonition-title,.rst-content .danger .wy-alert-title,.rst-content .error .admonition-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .admonition-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.wy-alert.wy-alert-danger .wy-alert-title{background:#f29f97}.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .warning,.rst-content .wy-alert-warning.admonition,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.note,.rst-content .wy-alert-warning.seealso,.rst-content .wy-alert-warning.tip,.wy-alert.wy-alert-warning{background:#ffedcc}.rst-content .admonition-todo .admonition-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .attention .admonition-title,.rst-content .attention .wy-alert-title,.rst-content .caution .admonition-title,.rst-content .caution .wy-alert-title,.rst-content .warning .admonition-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.admonition .admonition-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.wy-alert.wy-alert-warning .wy-alert-title{background:#f0b37e}.rst-content .note,.rst-content .seealso,.rst-content .wy-alert-info.admonition,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.wy-alert.wy-alert-info{background:#e7f2fa}.rst-content .note .admonition-title,.rst-content .note .wy-alert-title,.rst-content .seealso .admonition-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .admonition-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.wy-alert.wy-alert-info .wy-alert-title{background:#6ab0de}.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.admonition,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.warning,.wy-alert.wy-alert-success{background:#dbfaf4}.rst-content .hint .admonition-title,.rst-content .hint .wy-alert-title,.rst-content .important .admonition-title,.rst-content .important .wy-alert-title,.rst-content .tip .admonition-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .admonition-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.wy-alert.wy-alert-success .wy-alert-title{background:#1abc9c}.rst-content .wy-alert-neutral.admonition,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.wy-alert.wy-alert-neutral{background:#f3f6f6}.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .admonition-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.wy-alert.wy-alert-neutral .wy-alert-title{color:#404040;background:#e1e4e5}.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.wy-alert.wy-alert-neutral a{color:#2980b9}.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .note p:last-child,.rst-content .seealso p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.wy-alert p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27ae60}.wy-tray-container li.wy-tray-item-info{background:#2980b9}.wy-tray-container li.wy-tray-item-warning{background:#e67e22}.wy-tray-container li.wy-tray-item-danger{background:#e74c3c}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width:768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px;color:#fff;border:1px solid rgba(0,0,0,.1);background-color:#27ae60;text-decoration:none;font-weight:400;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 2px -1px hsla(0,0%,100%,.5),inset 0 -2px 0 0 rgba(0,0,0,.1);outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:inset 0 -1px 0 0 rgba(0,0,0,.05),inset 0 2px 0 0 rgba(0,0,0,.1);padding:8px 12px 6px}.btn:visited{color:#fff}.btn-disabled,.btn-disabled:active,.btn-disabled:focus,.btn-disabled:hover,.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980b9!important}.btn-info:hover{background-color:#2e8ece!important}.btn-neutral{background-color:#f3f6f6!important;color:#404040!important}.btn-neutral:hover{background-color:#e5ebeb!important;color:#404040}.btn-neutral:visited{color:#404040!important}.btn-success{background-color:#27ae60!important}.btn-success:hover{background-color:#295!important}.btn-danger{background-color:#e74c3c!important}.btn-danger:hover{background-color:#ea6153!important}.btn-warning{background-color:#e67e22!important}.btn-warning:hover{background-color:#e98b39!important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f!important}.btn-link{background-color:transparent!important;color:#2980b9;box-shadow:none;border-color:transparent!important}.btn-link:active,.btn-link:hover{background-color:transparent!important;color:#409ad5!important;box-shadow:none}.btn-link:visited{color:#9b59b6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:after,.wy-btn-group:before{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:1px solid #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980b9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:1px solid #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type=search]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980b9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned .wy-help-inline,.wy-form-aligned input,.wy-form-aligned label,.wy-form-aligned select,.wy-form-aligned textarea{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{margin:0}fieldset,legend{border:0;padding:0}legend{width:100%;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label,legend{display:block}label{margin:0 0 .3125em;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;max-width:1200px;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:after,.wy-control-group:before{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#e74c3c}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full input[type=color],.wy-control-group .wy-form-full input[type=date],.wy-control-group .wy-form-full input[type=datetime-local],.wy-control-group .wy-form-full input[type=datetime],.wy-control-group .wy-form-full input[type=email],.wy-control-group .wy-form-full input[type=month],.wy-control-group .wy-form-full input[type=number],.wy-control-group .wy-form-full input[type=password],.wy-control-group .wy-form-full input[type=search],.wy-control-group .wy-form-full input[type=tel],.wy-control-group .wy-form-full input[type=text],.wy-control-group .wy-form-full input[type=time],.wy-control-group .wy-form-full input[type=url],.wy-control-group .wy-form-full input[type=week],.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves input[type=color],.wy-control-group .wy-form-halves input[type=date],.wy-control-group .wy-form-halves input[type=datetime-local],.wy-control-group .wy-form-halves input[type=datetime],.wy-control-group .wy-form-halves input[type=email],.wy-control-group .wy-form-halves input[type=month],.wy-control-group .wy-form-halves input[type=number],.wy-control-group .wy-form-halves input[type=password],.wy-control-group .wy-form-halves input[type=search],.wy-control-group .wy-form-halves input[type=tel],.wy-control-group .wy-form-halves input[type=text],.wy-control-group .wy-form-halves input[type=time],.wy-control-group .wy-form-halves input[type=url],.wy-control-group .wy-form-halves input[type=week],.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds input[type=color],.wy-control-group .wy-form-thirds input[type=date],.wy-control-group .wy-form-thirds input[type=datetime-local],.wy-control-group .wy-form-thirds input[type=datetime],.wy-control-group .wy-form-thirds input[type=email],.wy-control-group .wy-form-thirds input[type=month],.wy-control-group .wy-form-thirds input[type=number],.wy-control-group .wy-form-thirds input[type=password],.wy-control-group .wy-form-thirds input[type=search],.wy-control-group .wy-form-thirds input[type=tel],.wy-control-group .wy-form-thirds input[type=text],.wy-control-group .wy-form-thirds input[type=time],.wy-control-group .wy-form-thirds input[type=url],.wy-control-group .wy-form-thirds input[type=week],.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full{float:left;display:block;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child,.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(odd){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child,.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control,.wy-control-no-input{margin:6px 0 0;font-size:90%}.wy-control-no-input{display:inline-block}.wy-control-group.fluid-input input[type=color],.wy-control-group.fluid-input input[type=date],.wy-control-group.fluid-input input[type=datetime-local],.wy-control-group.fluid-input input[type=datetime],.wy-control-group.fluid-input input[type=email],.wy-control-group.fluid-input input[type=month],.wy-control-group.fluid-input input[type=number],.wy-control-group.fluid-input input[type=password],.wy-control-group.fluid-input input[type=search],.wy-control-group.fluid-input input[type=tel],.wy-control-group.fluid-input input[type=text],.wy-control-group.fluid-input input[type=time],.wy-control-group.fluid-input input[type=url],.wy-control-group.fluid-input input[type=week]{width:100%}.wy-form-message-inline{padding-left:.3em;color:#666;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;*overflow:visible}input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type=datetime-local]{padding:.34375em .625em}input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type=checkbox],input[type=radio],input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus{outline:0;outline:thin dotted\9;border-color:#333}input.no-focus:focus{border-color:#ccc!important}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:thin dotted #333;outline:1px auto #129fea}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{color:#e74c3c;border:1px solid #e74c3c}input:focus:invalid:focus,select:focus:invalid:focus,textarea:focus:invalid:focus{border-color:#e74c3c}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#e74c3c}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}input[readonly],select[disabled],select[readonly],textarea[disabled],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type=checkbox][disabled],input[type=radio][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:1px solid #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{left:0;top:0;width:36px;height:12px;background:#ccc}.wy-switch:after,.wy-switch:before{position:absolute;content:"";display:block;border-radius:4px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{width:18px;height:18px;background:#999;left:-3px;top:-3px}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27ae60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#e74c3c}.wy-control-group.wy-control-group-error input[type=color],.wy-control-group.wy-control-group-error input[type=date],.wy-control-group.wy-control-group-error input[type=datetime-local],.wy-control-group.wy-control-group-error input[type=datetime],.wy-control-group.wy-control-group-error input[type=email],.wy-control-group.wy-control-group-error input[type=month],.wy-control-group.wy-control-group-error input[type=number],.wy-control-group.wy-control-group-error input[type=password],.wy-control-group.wy-control-group-error input[type=search],.wy-control-group.wy-control-group-error input[type=tel],.wy-control-group.wy-control-group-error input[type=text],.wy-control-group.wy-control-group-error input[type=time],.wy-control-group.wy-control-group-error input[type=url],.wy-control-group.wy-control-group-error input[type=week],.wy-control-group.wy-control-group-error textarea{border:1px solid #e74c3c}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27ae60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#e74c3c}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#e67e22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980b9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width:480px){.wy-form button[type=submit]{margin:.7em 0 0}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=text],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week],.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0}.wy-form-message,.wy-form-message-inline,.wy-form .wy-help-inline{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width:768px){.tablet-hide{display:none}}@media screen and (max-width:480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.rst-content table.docutils,.rst-content table.field-list,.wy-table{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.rst-content table.docutils caption,.rst-content table.field-list caption,.wy-table caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.rst-content table.docutils td,.rst-content table.docutils th,.rst-content table.field-list td,.rst-content table.field-list th,.wy-table td,.wy-table th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.rst-content table.docutils td:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list td:first-child,.rst-content table.field-list th:first-child,.wy-table td:first-child,.wy-table th:first-child{border-left-width:0}.rst-content table.docutils thead,.rst-content table.field-list thead,.wy-table thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.rst-content table.docutils thead th,.rst-content table.field-list thead th,.wy-table thead th{font-weight:700;border-bottom:2px solid #e1e4e5}.rst-content table.docutils td,.rst-content table.field-list td,.wy-table td{background-color:transparent;vertical-align:middle}.rst-content table.docutils td p,.rst-content table.field-list td p,.wy-table td p{line-height:18px}.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child,.wy-table td p:last-child{margin-bottom:0}.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min,.wy-table .wy-table-cell-min{width:1%;padding-right:0}.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:grey;font-size:90%}.wy-table-tertiary{color:grey;font-size:80%}.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,.wy-table-backed,.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td{background-color:#f3f6f6}.rst-content table.docutils,.wy-table-bordered-all{border:1px solid #e1e4e5}.rst-content table.docutils td,.wy-table-bordered-all td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.rst-content table.docutils tbody>tr:last-child td,.wy-table-bordered-all tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0!important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980b9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9b59b6}html{height:100%}body,html{overflow-x:hidden}body{font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-weight:400;color:#404040;min-height:100%;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#e67e22!important}a.wy-text-warning:hover{color:#eb9950!important}.wy-text-info{color:#2980b9!important}a.wy-text-info:hover{color:#409ad5!important}.wy-text-success{color:#27ae60!important}a.wy-text-success:hover{color:#36d278!important}.wy-text-danger{color:#e74c3c!important}a.wy-text-danger:hover{color:#ed7669!important}.wy-text-neutral{color:#404040!important}a.wy-text-neutral:hover{color:#595959!important}.rst-content .toctree-wrapper>p.caption,h1,h2,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif}p{line-height:24px;font-size:16px;margin:0 0 24px}h1{font-size:175%}.rst-content .toctree-wrapper>p.caption,h2{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}.rst-content code,.rst-content tt,code{white-space:nowrap;max-width:100%;background:#fff;border:1px solid #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#e74c3c;overflow-x:auto}.rst-content tt.code-large,code.code-large{font-size:90%}.rst-content .section ul,.rst-content .toctree-wrapper ul,.rst-content section ul,.wy-plain-list-disc,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.rst-content .section ul li,.rst-content .toctree-wrapper ul li,.rst-content section ul li,.wy-plain-list-disc li,article ul li{list-style:disc;margin-left:24px}.rst-content .section ul li p:last-child,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li p:last-child,.rst-content .toctree-wrapper ul li ul,.rst-content section ul li p:last-child,.rst-content section ul li ul,.wy-plain-list-disc li p:last-child,.wy-plain-list-disc li ul,article ul li p:last-child,article ul li ul{margin-bottom:0}.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,.rst-content section ul li li,.wy-plain-list-disc li li,article ul li li{list-style:circle}.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,.rst-content section ul li li li,.wy-plain-list-disc li li li,article ul li li li{list-style:square}.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,.rst-content section ul li ol li,.wy-plain-list-disc li ol li,article ul li ol li{list-style:decimal}.rst-content .section ol,.rst-content .section ol.arabic,.rst-content .toctree-wrapper ol,.rst-content .toctree-wrapper ol.arabic,.rst-content section ol,.rst-content section ol.arabic,.wy-plain-list-decimal,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.rst-content .section ol.arabic li,.rst-content .section ol li,.rst-content .toctree-wrapper ol.arabic li,.rst-content .toctree-wrapper ol li,.rst-content section ol.arabic li,.rst-content section ol li,.wy-plain-list-decimal li,article ol li{list-style:decimal;margin-left:24px}.rst-content .section ol.arabic li ul,.rst-content .section ol li p:last-child,.rst-content .section ol li ul,.rst-content .toctree-wrapper ol.arabic li ul,.rst-content .toctree-wrapper ol li p:last-child,.rst-content .toctree-wrapper ol li ul,.rst-content section ol.arabic li ul,.rst-content section ol li p:last-child,.rst-content section ol li ul,.wy-plain-list-decimal li p:last-child,.wy-plain-list-decimal li ul,article ol li p:last-child,article ol li ul{margin-bottom:0}.rst-content .section ol.arabic li ul li,.rst-content .section ol li ul li,.rst-content .toctree-wrapper ol.arabic li ul li,.rst-content .toctree-wrapper ol li ul li,.rst-content section ol.arabic li ul li,.rst-content section ol li ul li,.wy-plain-list-decimal li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:after,.wy-breadcrumbs:before{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs>li{display:inline-block;padding-top:5px}.wy-breadcrumbs>li.wy-breadcrumbs-aside{float:right}.rst-content .wy-breadcrumbs>li code,.rst-content .wy-breadcrumbs>li tt,.wy-breadcrumbs>li .rst-content tt,.wy-breadcrumbs>li code{all:inherit;color:inherit}.breadcrumb-item:before{content:"/";color:#bbb;font-size:13px;padding:0 6px 0 3px}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width:480px){.wy-breadcrumbs-extra,.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:after,.wy-menu-horiz:before{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz li,.wy-menu-horiz ul{display:inline-block}.wy-menu-horiz li:hover{background:hsla(0,0%,100%,.1)}.wy-menu-horiz li.divide-left{border-left:1px solid #404040}.wy-menu-horiz li.divide-right{border-right:1px solid #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#55a5d9;height:32px;line-height:32px;padding:0 1.618em;margin:12px 0 0;display:block;font-weight:700;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:1px solid #404040}.wy-menu-vertical li.divide-bottom{border-bottom:1px solid #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:grey;border-right:1px solid #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.rst-content .wy-menu-vertical li tt,.wy-menu-vertical li .rst-content tt,.wy-menu-vertical li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li button.toctree-expand{display:block;float:left;margin-left:-1.2em;line-height:18px;color:#4d4d4d;border:none;background:none;padding:0}.wy-menu-vertical li.current>a,.wy-menu-vertical li.on a{color:#404040;font-weight:700;position:relative;background:#fcfcfc;border:none;padding:.4045em 1.618em}.wy-menu-vertical li.current>a:hover,.wy-menu-vertical li.on a:hover{background:#fcfcfc}.wy-menu-vertical li.current>a:hover button.toctree-expand,.wy-menu-vertical li.on a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand{display:block;line-height:18px;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:1px solid #c9c9c9;border-top:1px solid #c9c9c9}.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .toctree-l11>ul{display:none}.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul{display:block}.wy-menu-vertical li.toctree-l3,.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a,.wy-menu-vertical li.toctree-l5 a,.wy-menu-vertical li.toctree-l6 a,.wy-menu-vertical li.toctree-l7 a,.wy-menu-vertical li.toctree-l8 a,.wy-menu-vertical li.toctree-l9 a,.wy-menu-vertical li.toctree-l10 a{color:#404040}.wy-menu-vertical li.toctree-l2 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l3 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l4 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l5 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l6 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l7 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l8 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l9 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l10 a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{display:block}.wy-menu-vertical li.toctree-l2.current>a{padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{padding:.4045em 1.618em .4045em 4.045em}.wy-menu-vertical li.toctree-l3.current>a{padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{padding:.4045em 1.618em .4045em 5.663em}.wy-menu-vertical li.toctree-l4.current>a{padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a{padding:.4045em 1.618em .4045em 7.281em}.wy-menu-vertical li.toctree-l5.current>a{padding:.4045em 7.281em}.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a{padding:.4045em 1.618em .4045em 8.899em}.wy-menu-vertical li.toctree-l6.current>a{padding:.4045em 8.899em}.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a{padding:.4045em 1.618em .4045em 10.517em}.wy-menu-vertical li.toctree-l7.current>a{padding:.4045em 10.517em}.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a{padding:.4045em 1.618em .4045em 12.135em}.wy-menu-vertical li.toctree-l8.current>a{padding:.4045em 12.135em}.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a{padding:.4045em 1.618em .4045em 13.753em}.wy-menu-vertical li.toctree-l9.current>a{padding:.4045em 13.753em}.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a{padding:.4045em 1.618em .4045em 15.371em}.wy-menu-vertical li.toctree-l10.current>a{padding:.4045em 15.371em}.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{padding:.4045em 1.618em .4045em 16.989em}.wy-menu-vertical li.toctree-l2.current>a,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{background:#c9c9c9}.wy-menu-vertical li.toctree-l2 button.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3.current>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{background:#bdbdbd}.wy-menu-vertical li.toctree-l3 button.toctree-expand{color:#969696}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:400}.wy-menu-vertical a{line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover button.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980b9;cursor:pointer;color:#fff}.wy-menu-vertical a:active button.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980b9;text-align:center;color:#fcfcfc}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a{color:#fcfcfc;font-size:100%;font-weight:700;display:inline-block;padding:4px 6px;margin-bottom:.809em;max-width:100%}.wy-side-nav-search .wy-dropdown>a:hover,.wy-side-nav-search>a:hover{background:hsla(0,0%,100%,.1)}.wy-side-nav-search .wy-dropdown>a img.logo,.wy-side-nav-search>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search .wy-dropdown>a.icon img.logo,.wy-side-nav-search>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:400;color:hsla(0,0%,100%,.3)}.wy-nav .wy-menu-vertical header{color:#2980b9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980b9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980b9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:after,.wy-nav-top:before{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:700}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:grey}footer p{margin-bottom:12px}.rst-content footer span.commit tt,footer span.commit .rst-content tt,footer span.commit code{padding:0;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:1em;background:none;border:none;color:grey}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:after,.rst-footer-buttons:before{width:100%;display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:after,.rst-breadcrumbs-buttons:before{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:1px solid #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:1px solid #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:grey;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width:768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-menu.wy-menu-vertical,.wy-side-nav-search,.wy-side-scroll{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width:1100px){.wy-nav-content-wrap{background:rgba(0,0,0,.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,.wy-nav-side,footer{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60;*zoom:1}.rst-versions .rst-current-version:after,.rst-versions .rst-current-version:before{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-content .eqno .rst-versions .rst-current-version .headerlink,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-content p .rst-versions .rst-current-version .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .icon,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-versions .rst-current-version .rst-content .eqno .headerlink,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-versions .rst-current-version .rst-content p .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-versions .rst-current-version .wy-menu-vertical li button.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version button.toctree-expand{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content .toctree-wrapper>p.caption,.rst-content h1,.rst-content h2,.rst-content h3,.rst-content h4,.rst-content h5,.rst-content h6{margin-bottom:24px}.rst-content img{max-width:100%;height:auto}.rst-content div.figure,.rst-content figure{margin-bottom:24px}.rst-content div.figure .caption-text,.rst-content figure .caption-text{font-style:italic}.rst-content div.figure p:last-child.caption,.rst-content figure p:last-child.caption{margin-bottom:0}.rst-content div.figure.align-center,.rst-content figure.align-center{text-align:center}.rst-content .section>a>img,.rst-content .section>img,.rst-content section>a>img,.rst-content section>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"\f08e";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;display:block;overflow:auto}.rst-content div[class^=highlight],.rst-content pre.literal-block{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px}.rst-content div[class^=highlight] div[class^=highlight],.rst-content pre.literal-block div[class^=highlight]{padding:0;border:none;margin:0}.rst-content div[class^=highlight] td.code{width:100%}.rst-content .linenodiv pre{border-right:1px solid #e6e9ea;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^=highlight] pre{white-space:pre;margin:0;padding:12px;display:block;overflow:auto}.rst-content div[class^=highlight] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content .linenodiv pre,.rst-content div[class^=highlight] pre,.rst-content pre.literal-block{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.4}.rst-content div.highlight .gp,.rst-content div.highlight span.linenos{user-select:none;pointer-events:none}.rst-content div.highlight span.linenos{display:inline-block;padding-left:0;padding-right:12px;margin-right:12px;border-right:1px solid #e6e9ea}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^=highlight],.rst-content div[class^=highlight] pre{white-space:pre-wrap}}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning{clear:both}.rst-content .admonition-todo .last,.rst-content .admonition-todo>:last-child,.rst-content .admonition .last,.rst-content .admonition>:last-child,.rst-content .attention .last,.rst-content .attention>:last-child,.rst-content .caution .last,.rst-content .caution>:last-child,.rst-content .danger .last,.rst-content .danger>:last-child,.rst-content .error .last,.rst-content .error>:last-child,.rst-content .hint .last,.rst-content .hint>:last-child,.rst-content .important .last,.rst-content .important>:last-child,.rst-content .note .last,.rst-content .note>:last-child,.rst-content .seealso .last,.rst-content .seealso>:last-child,.rst-content .tip .last,.rst-content .tip>:last-child,.rst-content .warning .last,.rst-content .warning>:last-child{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent!important;border-color:rgba(0,0,0,.1)!important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha>li,.rst-content .toctree-wrapper ol.loweralpha,.rst-content .toctree-wrapper ol.loweralpha>li,.rst-content section ol.loweralpha,.rst-content section ol.loweralpha>li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha>li,.rst-content .toctree-wrapper ol.upperalpha,.rst-content .toctree-wrapper ol.upperalpha>li,.rst-content section ol.upperalpha,.rst-content section ol.upperalpha>li{list-style:upper-alpha}.rst-content .section ol li>*,.rst-content .section ul li>*,.rst-content .toctree-wrapper ol li>*,.rst-content .toctree-wrapper ul li>*,.rst-content section ol li>*,.rst-content section ul li>*{margin-top:12px;margin-bottom:12px}.rst-content .section ol li>:first-child,.rst-content .section ul li>:first-child,.rst-content .toctree-wrapper ol li>:first-child,.rst-content .toctree-wrapper ul li>:first-child,.rst-content section ol li>:first-child,.rst-content section ul li>:first-child{margin-top:0}.rst-content .section ol li>p,.rst-content .section ol li>p:last-child,.rst-content .section ul li>p,.rst-content .section ul li>p:last-child,.rst-content .toctree-wrapper ol li>p,.rst-content .toctree-wrapper ol li>p:last-child,.rst-content .toctree-wrapper ul li>p,.rst-content .toctree-wrapper ul li>p:last-child,.rst-content section ol li>p,.rst-content section ol li>p:last-child,.rst-content section ul li>p,.rst-content section ul li>p:last-child{margin-bottom:12px}.rst-content .section ol li>p:only-child,.rst-content .section ol li>p:only-child:last-child,.rst-content .section ul li>p:only-child,.rst-content .section ul li>p:only-child:last-child,.rst-content .toctree-wrapper ol li>p:only-child,.rst-content .toctree-wrapper ol li>p:only-child:last-child,.rst-content .toctree-wrapper ul li>p:only-child,.rst-content .toctree-wrapper ul li>p:only-child:last-child,.rst-content section ol li>p:only-child,.rst-content section ol li>p:only-child:last-child,.rst-content section ul li>p:only-child,.rst-content section ul li>p:only-child:last-child{margin-bottom:0}.rst-content .section ol li>ol,.rst-content .section ol li>ul,.rst-content .section ul li>ol,.rst-content .section ul li>ul,.rst-content .toctree-wrapper ol li>ol,.rst-content .toctree-wrapper ol li>ul,.rst-content .toctree-wrapper ul li>ol,.rst-content .toctree-wrapper ul li>ul,.rst-content section ol li>ol,.rst-content section ol li>ul,.rst-content section ul li>ol,.rst-content section ul li>ul{margin-bottom:12px}.rst-content .section ol.simple li>*,.rst-content .section ol.simple li ol,.rst-content .section ol.simple li ul,.rst-content .section ul.simple li>*,.rst-content .section ul.simple li ol,.rst-content .section ul.simple li ul,.rst-content .toctree-wrapper ol.simple li>*,.rst-content .toctree-wrapper ol.simple li ol,.rst-content .toctree-wrapper ol.simple li ul,.rst-content .toctree-wrapper ul.simple li>*,.rst-content .toctree-wrapper ul.simple li ol,.rst-content .toctree-wrapper ul.simple li ul,.rst-content section ol.simple li>*,.rst-content section ol.simple li ol,.rst-content section ol.simple li ul,.rst-content section ul.simple li>*,.rst-content section ul.simple li ol,.rst-content section ul.simple li ul{margin-top:0;margin-bottom:0}.rst-content .line-block{margin-left:0;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0}.rst-content .topic-title{font-weight:700;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0 0 24px 24px}.rst-content .align-left{float:left;margin:0 24px 24px 0}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink{opacity:0;font-size:14px;font-family:FontAwesome;margin-left:.5em}.rst-content .code-block-caption .headerlink:focus,.rst-content .code-block-caption:hover .headerlink,.rst-content .eqno .headerlink:focus,.rst-content .eqno:hover .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink:focus,.rst-content .toctree-wrapper>p.caption:hover .headerlink,.rst-content dl dt .headerlink:focus,.rst-content dl dt:hover .headerlink,.rst-content h1 .headerlink:focus,.rst-content h1:hover .headerlink,.rst-content h2 .headerlink:focus,.rst-content h2:hover .headerlink,.rst-content h3 .headerlink:focus,.rst-content h3:hover .headerlink,.rst-content h4 .headerlink:focus,.rst-content h4:hover .headerlink,.rst-content h5 .headerlink:focus,.rst-content h5:hover .headerlink,.rst-content h6 .headerlink:focus,.rst-content h6:hover .headerlink,.rst-content p.caption .headerlink:focus,.rst-content p.caption:hover .headerlink,.rst-content p .headerlink:focus,.rst-content p:hover .headerlink,.rst-content table>caption .headerlink:focus,.rst-content table>caption:hover .headerlink{opacity:1}.rst-content p a{overflow-wrap:anywhere}.rst-content .wy-table td p,.rst-content .wy-table td ul,.rst-content .wy-table th p,.rst-content .wy-table th ul,.rst-content table.docutils td p,.rst-content table.docutils td ul,.rst-content table.docutils th p,.rst-content table.docutils th ul,.rst-content table.field-list td p,.rst-content table.field-list td ul,.rst-content table.field-list th p,.rst-content table.field-list th ul{font-size:inherit}.rst-content .btn:focus{outline:2px solid}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:1px solid #e1e4e5}.rst-content .sidebar dl,.rst-content .sidebar p,.rst-content .sidebar ul{font-size:90%}.rst-content .sidebar .last,.rst-content .sidebar>:last-child{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif;font-weight:700;background:#e1e4e5;padding:6px 12px;margin:-24px -24px 24px;font-size:100%}.rst-content .highlighted{background:#f1c40f;box-shadow:0 0 0 2px #f1c40f;display:inline;font-weight:700}.rst-content .citation-reference,.rst-content .footnote-reference{vertical-align:baseline;position:relative;top:-.4em;line-height:0;font-size:90%}.rst-content .citation-reference>span.fn-bracket,.rst-content .footnote-reference>span.fn-bracket{display:none}.rst-content .hlist{width:100%}.rst-content dl dt span.classifier:before{content:" : "}.rst-content dl dt span.classifier-delimiter{display:none!important}html.writer-html4 .rst-content table.docutils.citation,html.writer-html4 .rst-content table.docutils.footnote{background:none;border:none}html.writer-html4 .rst-content table.docutils.citation td,html.writer-html4 .rst-content table.docutils.citation tr,html.writer-html4 .rst-content table.docutils.footnote td,html.writer-html4 .rst-content table.docutils.footnote tr{border:none;background-color:transparent!important;white-space:normal}html.writer-html4 .rst-content table.docutils.citation td.label,html.writer-html4 .rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{display:grid;grid-template-columns:auto minmax(80%,95%)}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{display:inline-grid;grid-template-columns:max-content auto}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{display:grid;grid-template-columns:auto auto minmax(.65rem,auto) minmax(40%,95%)}html.writer-html5 .rst-content aside.citation>span.label,html.writer-html5 .rst-content aside.footnote>span.label,html.writer-html5 .rst-content div.citation>span.label{grid-column-start:1;grid-column-end:2}html.writer-html5 .rst-content aside.citation>span.backrefs,html.writer-html5 .rst-content aside.footnote>span.backrefs,html.writer-html5 .rst-content div.citation>span.backrefs{grid-column-start:2;grid-column-end:3;grid-row-start:1;grid-row-end:3}html.writer-html5 .rst-content aside.citation>p,html.writer-html5 .rst-content aside.footnote>p,html.writer-html5 .rst-content div.citation>p{grid-column-start:4;grid-column-end:5}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{margin-bottom:24px}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{padding-left:1rem}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dd,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dd,html.writer-html5 .rst-content dl.footnote>dt{margin-bottom:0}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{font-size:.9rem}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.footnote>dt{margin:0 .5rem .5rem 0;line-height:1.2rem;word-break:break-all;font-weight:400}html.writer-html5 .rst-content dl.citation>dt>span.brackets:before,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before{content:"["}html.writer-html5 .rst-content dl.citation>dt>span.brackets:after,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after{content:"]"}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a{word-break:keep-all}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a:not(:first-child):before,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.footnote>dd{margin:0 0 .5rem;line-height:1.2rem}html.writer-html5 .rst-content dl.citation>dd p,html.writer-html5 .rst-content dl.footnote>dd p{font-size:.9rem}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{padding-left:1rem;padding-right:1rem;font-size:.9rem;line-height:1.2rem}html.writer-html5 .rst-content aside.citation p,html.writer-html5 .rst-content aside.footnote p,html.writer-html5 .rst-content div.citation p{font-size:.9rem;line-height:1.2rem;margin-bottom:12px}html.writer-html5 .rst-content aside.citation span.backrefs,html.writer-html5 .rst-content aside.footnote span.backrefs,html.writer-html5 .rst-content div.citation span.backrefs{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content aside.citation span.backrefs>a,html.writer-html5 .rst-content aside.footnote span.backrefs>a,html.writer-html5 .rst-content div.citation span.backrefs>a{word-break:keep-all}html.writer-html5 .rst-content aside.citation span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content aside.footnote span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content div.citation span.backrefs>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content aside.citation span.label,html.writer-html5 .rst-content aside.footnote span.label,html.writer-html5 .rst-content div.citation span.label{line-height:1.2rem}html.writer-html5 .rst-content aside.citation-list,html.writer-html5 .rst-content aside.footnote-list,html.writer-html5 .rst-content div.citation-list{margin-bottom:24px}html.writer-html5 .rst-content dl.option-list kbd{font-size:.9rem}.rst-content table.docutils.footnote,html.writer-html4 .rst-content table.docutils.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content aside.footnote-list aside.footnote,html.writer-html5 .rst-content div.citation-list>div.citation,html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{color:grey}.rst-content table.docutils.footnote code,.rst-content table.docutils.footnote tt,html.writer-html4 .rst-content table.docutils.citation code,html.writer-html4 .rst-content table.docutils.citation tt,html.writer-html5 .rst-content aside.footnote-list aside.footnote code,html.writer-html5 .rst-content aside.footnote-list aside.footnote tt,html.writer-html5 .rst-content aside.footnote code,html.writer-html5 .rst-content aside.footnote tt,html.writer-html5 .rst-content div.citation-list>div.citation code,html.writer-html5 .rst-content div.citation-list>div.citation tt,html.writer-html5 .rst-content dl.citation code,html.writer-html5 .rst-content dl.citation tt,html.writer-html5 .rst-content dl.footnote code,html.writer-html5 .rst-content dl.footnote tt{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}html.writer-html5 .rst-content table.docutils th{border:1px solid #e1e4e5}html.writer-html5 .rst-content table.docutils td>p,html.writer-html5 .rst-content table.docutils th>p{line-height:1rem;margin-bottom:0;font-size:.9rem}.rst-content table.docutils td .last,.rst-content table.docutils td .last>:last-child{margin-bottom:0}.rst-content table.field-list,.rst-content table.field-list td{border:none}.rst-content table.field-list td p{line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content code,.rst-content tt{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;padding:2px 5px}.rst-content code big,.rst-content code em,.rst-content tt big,.rst-content tt em{font-size:100%!important;line-height:normal}.rst-content code.literal,.rst-content tt.literal{color:#e74c3c;white-space:normal}.rst-content code.xref,.rst-content tt.xref,a .rst-content code,a .rst-content tt{font-weight:700;color:#404040;overflow-wrap:normal}.rst-content kbd,.rst-content pre,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace}.rst-content a code,.rst-content a tt{color:#2980b9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:700;margin-bottom:12px}.rst-content dl ol,.rst-content dl p,.rst-content dl table,.rst-content dl ul{margin-bottom:12px}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}.rst-content dl dd>ol:last-child,.rst-content dl dd>p:last-child,.rst-content dl dd>table:last-child,.rst-content dl dd>ul:last-child{margin-bottom:0}html.writer-html4 .rst-content dl:not(.docutils),html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple){margin-bottom:24px}html.writer-html4 .rst-content dl:not(.docutils)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980b9;border-top:3px solid #6ab0de;padding:6px;position:relative}html.writer-html4 .rst-content dl:not(.docutils)>dt:before,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:before{color:#6ab0de}html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{margin-bottom:6px;border:none;border-left:3px solid #ccc;background:#f0f0f0;color:#555}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:first-child{margin-top:0}html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{background-color:transparent;border:none;padding:0;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .optional,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .property,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .property{display:inline-block;padding-right:8px;max-width:100%}html.writer-html4 .rst-content dl:not(.docutils) .k,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .k{font-style:italic}html.writer-html4 .rst-content dl:not(.docutils) .descclassname,html.writer-html4 .rst-content dl:not(.docutils) .descname,html.writer-html4 .rst-content dl:not(.docutils) .sig-name,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .sig-name{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#000}.rst-content .viewcode-back,.rst-content .viewcode-link{display:inline-block;color:#27ae60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:700}.rst-content code.download,.rst-content tt.download{background:inherit;padding:inherit;font-weight:400;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content code.download span:first-child,.rst-content tt.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{margin-right:4px}.rst-content .guilabel,.rst-content .menuselection{font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .guilabel,.rst-content .menuselection{border:1px solid #7fbbe3;background:#e7f2fa}.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>.kbd,.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>kbd{color:inherit;font-size:80%;background-color:#fff;border:1px solid #a6a6a6;border-radius:4px;box-shadow:0 2px grey;padding:2.4px 6px;margin:auto 0}.rst-content .versionmodified{font-style:italic}@media screen and (max-width:480px){.rst-content .sidebar{width:100%}}span[id*=MathJax-Span]{color:#404040}.math{text-align:center}@font-face{font-family:Lato;src:url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"),url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");font-weight:400;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"),url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");font-weight:700;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"),url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");font-weight:700;font-style:italic;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"),url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");font-weight:400;font-style:italic;font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:400;src:url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"),url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:700;src:url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"),url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");font-display:block} \ No newline at end of file diff --git a/sphinx/_build/html/_static/doctools.js b/sphinx/_build/html/_static/doctools.js new file mode 100644 index 000000000..d06a71d75 --- /dev/null +++ b/sphinx/_build/html/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/sphinx/_build/html/_static/documentation_options.js b/sphinx/_build/html/_static/documentation_options.js new file mode 100644 index 000000000..1cf10965b --- /dev/null +++ b/sphinx/_build/html/_static/documentation_options.js @@ -0,0 +1,13 @@ +const DOCUMENTATION_OPTIONS = { + VERSION: '2024.04.23', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/sphinx/_build/html/_static/file.png b/sphinx/_build/html/_static/file.png new file mode 100644 index 000000000..a858a410e Binary files /dev/null and b/sphinx/_build/html/_static/file.png differ diff --git a/sphinx/_build/html/_static/jquery.js b/sphinx/_build/html/_static/jquery.js new file mode 100644 index 000000000..c4c6022f2 --- /dev/null +++ b/sphinx/_build/html/_static/jquery.js @@ -0,0 +1,2 @@ +/*! jQuery v3.6.0 | (c) OpenJS Foundation and other contributors | jquery.org/license */ +!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],r=Object.getPrototypeOf,s=t.slice,g=t.flat?function(e){return t.flat.call(e)}:function(e){return t.concat.apply([],e)},u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType&&"function"!=typeof e.item},x=function(e){return null!=e&&e===e.window},E=C.document,c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.6.0",S=function(e,t){return new S.fn.init(e,t)};function p(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e&&e.namespaceURI,n=e&&(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},j=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||D,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,D=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function je(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function De(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function qe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Le(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var _t,zt=[],Ut=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=zt.pop()||S.expando+"_"+wt.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Ut.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Ut.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Ut,"$1"+r):!1!==e.jsonp&&(e.url+=(Tt.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,zt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((_t=E.implementation.createHTMLDocument("").body).innerHTML="
",2===_t.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=Fe(y.pixelPosition,function(e,t){if(t)return t=We(e,n),Pe.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="",p="hidden"in a,q=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){p=!0,q=!0}}();var y={elements:s.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:r,shivCSS:s.shivCSS!==!1,supportsUnknownElements:q,shivMethods:s.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=y,j(b);var z=/^$|\b(?:all|print)\b/,A="html5shiv",B=!q&&function(){var c=b.documentElement;return!("undefined"==typeof b.namespaces||"undefined"==typeof b.parentWindow||"undefined"==typeof c.applyElement||"undefined"==typeof c.removeNode||"undefined"==typeof a.attachEvent)}();y.type+=" print",y.shivPrint=o,o(b),"object"==typeof module&&module.exports&&(module.exports=y)}("undefined"!=typeof window?window:this,document); \ No newline at end of file diff --git a/sphinx/_build/html/_static/js/html5shiv.min.js b/sphinx/_build/html/_static/js/html5shiv.min.js new file mode 100644 index 000000000..cd1c674f5 --- /dev/null +++ b/sphinx/_build/html/_static/js/html5shiv.min.js @@ -0,0 +1,4 @@ +/** +* @preserve HTML5 Shiv 3.7.3 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed +*/ +!function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3-pre",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document); \ No newline at end of file diff --git a/sphinx/_build/html/_static/js/theme.js b/sphinx/_build/html/_static/js/theme.js new file mode 100644 index 000000000..1fddb6ee4 --- /dev/null +++ b/sphinx/_build/html/_static/js/theme.js @@ -0,0 +1 @@ +!function(n){var e={};function t(i){if(e[i])return e[i].exports;var o=e[i]={i:i,l:!1,exports:{}};return n[i].call(o.exports,o,o.exports,t),o.l=!0,o.exports}t.m=n,t.c=e,t.d=function(n,e,i){t.o(n,e)||Object.defineProperty(n,e,{enumerable:!0,get:i})},t.r=function(n){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(n,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(n,"__esModule",{value:!0})},t.t=function(n,e){if(1&e&&(n=t(n)),8&e)return n;if(4&e&&"object"==typeof n&&n&&n.__esModule)return n;var i=Object.create(null);if(t.r(i),Object.defineProperty(i,"default",{enumerable:!0,value:n}),2&e&&"string"!=typeof n)for(var o in n)t.d(i,o,function(e){return n[e]}.bind(null,o));return i},t.n=function(n){var e=n&&n.__esModule?function(){return n.default}:function(){return n};return t.d(e,"a",e),e},t.o=function(n,e){return Object.prototype.hasOwnProperty.call(n,e)},t.p="",t(t.s=0)}([function(n,e,t){t(1),n.exports=t(3)},function(n,e,t){(function(){var e="undefined"!=typeof window?window.jQuery:t(2);n.exports.ThemeNav={navBar:null,win:null,winScroll:!1,winResize:!1,linkScroll:!1,winPosition:0,winHeight:null,docHeight:null,isRunning:!1,enable:function(n){var t=this;void 0===n&&(n=!0),t.isRunning||(t.isRunning=!0,e((function(e){t.init(e),t.reset(),t.win.on("hashchange",t.reset),n&&t.win.on("scroll",(function(){t.linkScroll||t.winScroll||(t.winScroll=!0,requestAnimationFrame((function(){t.onScroll()})))})),t.win.on("resize",(function(){t.winResize||(t.winResize=!0,requestAnimationFrame((function(){t.onResize()})))})),t.onResize()})))},enableSticky:function(){this.enable(!0)},init:function(n){n(document);var e=this;this.navBar=n("div.wy-side-scroll:first"),this.win=n(window),n(document).on("click","[data-toggle='wy-nav-top']",(function(){n("[data-toggle='wy-nav-shift']").toggleClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift")})).on("click",".wy-menu-vertical .current ul li a",(function(){var t=n(this);n("[data-toggle='wy-nav-shift']").removeClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift"),e.toggleCurrent(t),e.hashChange()})).on("click","[data-toggle='rst-current-version']",(function(){n("[data-toggle='rst-versions']").toggleClass("shift-up")})),n("table.docutils:not(.field-list,.footnote,.citation)").wrap("
"),n("table.docutils.footnote").wrap("
"),n("table.docutils.citation").wrap("
"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n(''),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}if(t.length>0){$(".wy-menu-vertical .current").removeClass("current").attr("aria-expanded","false"),t.addClass("current").attr("aria-expanded","true"),t.closest("li.toctree-l1").parent().addClass("current").attr("aria-expanded","true");for(let n=1;n<=10;n++)t.closest("li.toctree-l"+n).addClass("current").attr("aria-expanded","true");t[0].scrollIntoView()}}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current").attr("aria-expanded","false"),e.siblings().find("li.current").removeClass("current").attr("aria-expanded","false");var t=e.find("> ul li");t.length&&(t.removeClass("current").attr("aria-expanded","false"),e.toggleClass("current").attr("aria-expanded",(function(n,e){return"true"==e?"false":"true"})))}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/sphinx/_build/html/_static/minus.png b/sphinx/_build/html/_static/minus.png new file mode 100644 index 000000000..d96755fda Binary files /dev/null and b/sphinx/_build/html/_static/minus.png differ diff --git a/sphinx/_build/html/_static/plus.png b/sphinx/_build/html/_static/plus.png new file mode 100644 index 000000000..7107cec93 Binary files /dev/null and b/sphinx/_build/html/_static/plus.png differ diff --git a/sphinx/_build/html/_static/pygments.css b/sphinx/_build/html/_static/pygments.css new file mode 100644 index 000000000..84ab3030a --- /dev/null +++ b/sphinx/_build/html/_static/pygments.css @@ -0,0 +1,75 @@ +pre { line-height: 125%; } +td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight .hll { background-color: #ffffcc } +.highlight { background: #f8f8f8; } +.highlight .c { color: #3D7B7B; font-style: italic } /* Comment */ +.highlight .err { border: 1px solid #FF0000 } /* Error */ +.highlight .k { color: #008000; font-weight: bold } /* Keyword */ +.highlight .o { color: #666666 } /* Operator */ +.highlight .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */ +.highlight .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #9C6500 } /* Comment.Preproc */ +.highlight .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */ +.highlight .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */ +.highlight .gd { color: #A00000 } /* Generic.Deleted */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +.highlight .gr { color: #E40000 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #008400 } /* Generic.Inserted */ +.highlight .go { color: #717171 } /* Generic.Output */ +.highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #0044DD } /* Generic.Traceback */ +.highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #008000 } /* Keyword.Pseudo */ +.highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #B00040 } /* Keyword.Type */ +.highlight .m { color: #666666 } /* Literal.Number */ +.highlight .s { color: #BA2121 } /* Literal.String */ +.highlight .na { color: #687822 } /* Name.Attribute */ +.highlight .nb { color: #008000 } /* Name.Builtin */ +.highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */ +.highlight .no { color: #880000 } /* Name.Constant */ +.highlight .nd { color: #AA22FF } /* Name.Decorator */ +.highlight .ni { color: #717171; font-weight: bold } /* Name.Entity */ +.highlight .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */ +.highlight .nf { color: #0000FF } /* Name.Function */ +.highlight .nl { color: #767600 } /* Name.Label */ +.highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ +.highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #19177C } /* Name.Variable */ +.highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ +.highlight .w { color: #bbbbbb } /* Text.Whitespace */ +.highlight .mb { color: #666666 } /* Literal.Number.Bin */ +.highlight .mf { color: #666666 } /* Literal.Number.Float */ +.highlight .mh { color: #666666 } /* Literal.Number.Hex */ +.highlight .mi { color: #666666 } /* Literal.Number.Integer */ +.highlight .mo { color: #666666 } /* Literal.Number.Oct */ +.highlight .sa { color: #BA2121 } /* Literal.String.Affix */ +.highlight .sb { color: #BA2121 } /* Literal.String.Backtick */ +.highlight .sc { color: #BA2121 } /* Literal.String.Char */ +.highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */ +.highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #BA2121 } /* Literal.String.Double */ +.highlight .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */ +.highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */ +.highlight .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */ +.highlight .sx { color: #008000 } /* Literal.String.Other */ +.highlight .sr { color: #A45A77 } /* Literal.String.Regex */ +.highlight .s1 { color: #BA2121 } /* Literal.String.Single */ +.highlight .ss { color: #19177C } /* Literal.String.Symbol */ +.highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #0000FF } /* Name.Function.Magic */ +.highlight .vc { color: #19177C } /* Name.Variable.Class */ +.highlight .vg { color: #19177C } /* Name.Variable.Global */ +.highlight .vi { color: #19177C } /* Name.Variable.Instance */ +.highlight .vm { color: #19177C } /* Name.Variable.Magic */ +.highlight .il { color: #666666 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/sphinx/_build/html/_static/searchtools.js b/sphinx/_build/html/_static/searchtools.js new file mode 100644 index 000000000..7918c3fab --- /dev/null +++ b/sphinx/_build/html/_static/searchtools.js @@ -0,0 +1,574 @@ +/* + * searchtools.js + * ~~~~~~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for the full-text search. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +/** + * Simple result scoring code. + */ +if (typeof Scorer === "undefined") { + var Scorer = { + // Implement the following function to further tweak the score for each result + // The function takes a result array [docname, title, anchor, descr, score, filename] + // and returns the new score. + /* + score: result => { + const [docname, title, anchor, descr, score, filename] = result + return score + }, + */ + + // query matches the full name of an object + objNameMatch: 11, + // or matches in the last dotted part of the object name + objPartialMatch: 6, + // Additive scores depending on the priority of the object + objPrio: { + 0: 15, // used to be importantResults + 1: 5, // used to be objectResults + 2: -5, // used to be unimportantResults + }, + // Used when the priority is not in the mapping. + objPrioDefault: 0, + + // query found in title + title: 15, + partialTitle: 7, + // query found in terms + term: 5, + partialTerm: 2, + }; +} + +const _removeChildren = (element) => { + while (element && element.lastChild) element.removeChild(element.lastChild); +}; + +/** + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping + */ +const _escapeRegExp = (string) => + string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string + +const _displayItem = (item, searchTerms, highlightTerms) => { + const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; + const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; + const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; + const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; + const contentRoot = document.documentElement.dataset.content_root; + + const [docName, title, anchor, descr, score, _filename] = item; + + let listItem = document.createElement("li"); + let requestUrl; + let linkUrl; + if (docBuilder === "dirhtml") { + // dirhtml builder + let dirname = docName + "/"; + if (dirname.match(/\/index\/$/)) + dirname = dirname.substring(0, dirname.length - 6); + else if (dirname === "index/") dirname = ""; + requestUrl = contentRoot + dirname; + linkUrl = requestUrl; + } else { + // normal html builders + requestUrl = contentRoot + docName + docFileSuffix; + linkUrl = docName + docLinkSuffix; + } + let linkEl = listItem.appendChild(document.createElement("a")); + linkEl.href = linkUrl + anchor; + linkEl.dataset.score = score; + linkEl.innerHTML = title; + if (descr) { + listItem.appendChild(document.createElement("span")).innerHTML = + " (" + descr + ")"; + // highlight search terms in the description + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + } + else if (showSearchSummary) + fetch(requestUrl) + .then((responseData) => responseData.text()) + .then((data) => { + if (data) + listItem.appendChild( + Search.makeSearchSummary(data, searchTerms) + ); + // highlight search terms in the summary + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + }); + Search.output.appendChild(listItem); +}; +const _finishSearch = (resultCount) => { + Search.stopPulse(); + Search.title.innerText = _("Search Results"); + if (!resultCount) + Search.status.innerText = Documentation.gettext( + "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." + ); + else + Search.status.innerText = _( + `Search finished, found ${resultCount} page(s) matching the search query.` + ); +}; +const _displayNextItem = ( + results, + resultCount, + searchTerms, + highlightTerms, +) => { + // results left, load the summary and display it + // this is intended to be dynamic (don't sub resultsCount) + if (results.length) { + _displayItem(results.pop(), searchTerms, highlightTerms); + setTimeout( + () => _displayNextItem(results, resultCount, searchTerms, highlightTerms), + 5 + ); + } + // search finished, update title and status message + else _finishSearch(resultCount); +}; + +/** + * Default splitQuery function. Can be overridden in ``sphinx.search`` with a + * custom function per language. + * + * The regular expression works by splitting the string on consecutive characters + * that are not Unicode letters, numbers, underscores, or emoji characters. + * This is the same as ``\W+`` in Python, preserving the surrogate pair area. + */ +if (typeof splitQuery === "undefined") { + var splitQuery = (query) => query + .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) + .filter(term => term) // remove remaining empty strings +} + +/** + * Search Module + */ +const Search = { + _index: null, + _queued_query: null, + _pulse_status: -1, + + htmlToText: (htmlString) => { + const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); + htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); + const docContent = htmlElement.querySelector('[role="main"]'); + if (docContent !== undefined) return docContent.textContent; + console.warn( + "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." + ); + return ""; + }, + + init: () => { + const query = new URLSearchParams(window.location.search).get("q"); + document + .querySelectorAll('input[name="q"]') + .forEach((el) => (el.value = query)); + if (query) Search.performSearch(query); + }, + + loadIndex: (url) => + (document.body.appendChild(document.createElement("script")).src = url), + + setIndex: (index) => { + Search._index = index; + if (Search._queued_query !== null) { + const query = Search._queued_query; + Search._queued_query = null; + Search.query(query); + } + }, + + hasIndex: () => Search._index !== null, + + deferQuery: (query) => (Search._queued_query = query), + + stopPulse: () => (Search._pulse_status = -1), + + startPulse: () => { + if (Search._pulse_status >= 0) return; + + const pulse = () => { + Search._pulse_status = (Search._pulse_status + 1) % 4; + Search.dots.innerText = ".".repeat(Search._pulse_status); + if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); + }; + pulse(); + }, + + /** + * perform a search for something (or wait until index is loaded) + */ + performSearch: (query) => { + // create the required interface elements + const searchText = document.createElement("h2"); + searchText.textContent = _("Searching"); + const searchSummary = document.createElement("p"); + searchSummary.classList.add("search-summary"); + searchSummary.innerText = ""; + const searchList = document.createElement("ul"); + searchList.classList.add("search"); + + const out = document.getElementById("search-results"); + Search.title = out.appendChild(searchText); + Search.dots = Search.title.appendChild(document.createElement("span")); + Search.status = out.appendChild(searchSummary); + Search.output = out.appendChild(searchList); + + const searchProgress = document.getElementById("search-progress"); + // Some themes don't use the search progress node + if (searchProgress) { + searchProgress.innerText = _("Preparing search..."); + } + Search.startPulse(); + + // index already loaded, the browser was quick! + if (Search.hasIndex()) Search.query(query); + else Search.deferQuery(query); + }, + + /** + * execute search (requires search index to be loaded) + */ + query: (query) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + const allTitles = Search._index.alltitles; + const indexEntries = Search._index.indexentries; + + // stem the search terms and add them to the correct list + const stemmer = new Stemmer(); + const searchTerms = new Set(); + const excludedTerms = new Set(); + const highlightTerms = new Set(); + const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); + splitQuery(query.trim()).forEach((queryTerm) => { + const queryTermLower = queryTerm.toLowerCase(); + + // maybe skip this "word" + // stopwords array is from language_data.js + if ( + stopwords.indexOf(queryTermLower) !== -1 || + queryTerm.match(/^\d+$/) + ) + return; + + // stem the word + let word = stemmer.stemWord(queryTermLower); + // select the correct list + if (word[0] === "-") excludedTerms.add(word.substr(1)); + else { + searchTerms.add(word); + highlightTerms.add(queryTermLower); + } + }); + + if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js + localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) + } + + // console.debug("SEARCH: searching for:"); + // console.info("required: ", [...searchTerms]); + // console.info("excluded: ", [...excludedTerms]); + + // array of [docname, title, anchor, descr, score, filename] + let results = []; + _removeChildren(document.getElementById("search-progress")); + + const queryLower = query.toLowerCase(); + for (const [title, foundTitles] of Object.entries(allTitles)) { + if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) { + for (const [file, id] of foundTitles) { + let score = Math.round(100 * queryLower.length / title.length) + results.push([ + docNames[file], + titles[file] !== title ? `${titles[file]} > ${title}` : title, + id !== null ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // search for explicit entries in index directives + for (const [entry, foundEntries] of Object.entries(indexEntries)) { + if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { + for (const [file, id] of foundEntries) { + let score = Math.round(100 * queryLower.length / entry.length) + results.push([ + docNames[file], + titles[file], + id ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // lookup as object + objectTerms.forEach((term) => + results.push(...Search.performObjectSearch(term, objectTerms)) + ); + + // lookup as search terms in fulltext + results.push(...Search.performTermsSearch(searchTerms, excludedTerms)); + + // let the scorer override scores with a custom scoring function + if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item))); + + // now sort the results by score (in opposite order of appearance, since the + // display function below uses pop() to retrieve items) and then + // alphabetically + results.sort((a, b) => { + const leftScore = a[4]; + const rightScore = b[4]; + if (leftScore === rightScore) { + // same score: sort alphabetically + const leftTitle = a[1].toLowerCase(); + const rightTitle = b[1].toLowerCase(); + if (leftTitle === rightTitle) return 0; + return leftTitle > rightTitle ? -1 : 1; // inverted is intentional + } + return leftScore > rightScore ? 1 : -1; + }); + + // remove duplicate search results + // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept + let seen = new Set(); + results = results.reverse().reduce((acc, result) => { + let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); + if (!seen.has(resultStr)) { + acc.push(result); + seen.add(resultStr); + } + return acc; + }, []); + + results = results.reverse(); + + // for debugging + //Search.lastresults = results.slice(); // a copy + // console.info("search results:", Search.lastresults); + + // print the results + _displayNextItem(results, results.length, searchTerms, highlightTerms); + }, + + /** + * search for object names + */ + performObjectSearch: (object, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const objects = Search._index.objects; + const objNames = Search._index.objnames; + const titles = Search._index.titles; + + const results = []; + + const objectSearchCallback = (prefix, match) => { + const name = match[4] + const fullname = (prefix ? prefix + "." : "") + name; + const fullnameLower = fullname.toLowerCase(); + if (fullnameLower.indexOf(object) < 0) return; + + let score = 0; + const parts = fullnameLower.split("."); + + // check for different match types: exact matches of full name or + // "last name" (i.e. last dotted part) + if (fullnameLower === object || parts.slice(-1)[0] === object) + score += Scorer.objNameMatch; + else if (parts.slice(-1)[0].indexOf(object) > -1) + score += Scorer.objPartialMatch; // matches in last name + + const objName = objNames[match[1]][2]; + const title = titles[match[0]]; + + // If more than one term searched for, we require other words to be + // found in the name/title/description + const otherTerms = new Set(objectTerms); + otherTerms.delete(object); + if (otherTerms.size > 0) { + const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); + if ( + [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) + ) + return; + } + + let anchor = match[3]; + if (anchor === "") anchor = fullname; + else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; + + const descr = objName + _(", in ") + title; + + // add custom score for some objects according to scorer + if (Scorer.objPrio.hasOwnProperty(match[2])) + score += Scorer.objPrio[match[2]]; + else score += Scorer.objPrioDefault; + + results.push([ + docNames[match[0]], + fullname, + "#" + anchor, + descr, + score, + filenames[match[0]], + ]); + }; + Object.keys(objects).forEach((prefix) => + objects[prefix].forEach((array) => + objectSearchCallback(prefix, array) + ) + ); + return results; + }, + + /** + * search for full-text terms in the index + */ + performTermsSearch: (searchTerms, excludedTerms) => { + // prepare search + const terms = Search._index.terms; + const titleTerms = Search._index.titleterms; + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + + const scoreMap = new Map(); + const fileMap = new Map(); + + // perform the search on the required terms + searchTerms.forEach((word) => { + const files = []; + const arr = [ + { files: terms[word], score: Scorer.term }, + { files: titleTerms[word], score: Scorer.title }, + ]; + // add support for partial matches + if (word.length > 2) { + const escapedWord = _escapeRegExp(word); + Object.keys(terms).forEach((term) => { + if (term.match(escapedWord) && !terms[word]) + arr.push({ files: terms[term], score: Scorer.partialTerm }); + }); + Object.keys(titleTerms).forEach((term) => { + if (term.match(escapedWord) && !titleTerms[word]) + arr.push({ files: titleTerms[word], score: Scorer.partialTitle }); + }); + } + + // no match but word was a required one + if (arr.every((record) => record.files === undefined)) return; + + // found search word in contents + arr.forEach((record) => { + if (record.files === undefined) return; + + let recordFiles = record.files; + if (recordFiles.length === undefined) recordFiles = [recordFiles]; + files.push(...recordFiles); + + // set score for the word in each file + recordFiles.forEach((file) => { + if (!scoreMap.has(file)) scoreMap.set(file, {}); + scoreMap.get(file)[word] = record.score; + }); + }); + + // create the mapping + files.forEach((file) => { + if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1) + fileMap.get(file).push(word); + else fileMap.set(file, [word]); + }); + }); + + // now check if the files don't contain excluded terms + const results = []; + for (const [file, wordList] of fileMap) { + // check if all requirements are matched + + // as search terms with length < 3 are discarded + const filteredTermCount = [...searchTerms].filter( + (term) => term.length > 2 + ).length; + if ( + wordList.length !== searchTerms.size && + wordList.length !== filteredTermCount + ) + continue; + + // ensure that none of the excluded terms is in the search result + if ( + [...excludedTerms].some( + (term) => + terms[term] === file || + titleTerms[term] === file || + (terms[term] || []).includes(file) || + (titleTerms[term] || []).includes(file) + ) + ) + break; + + // select one (max) score for the file. + const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w])); + // add result to the result list + results.push([ + docNames[file], + titles[file], + "", + null, + score, + filenames[file], + ]); + } + return results; + }, + + /** + * helper function to return a node containing the + * search summary for a given text. keywords is a list + * of stemmed words. + */ + makeSearchSummary: (htmlText, keywords) => { + const text = Search.htmlToText(htmlText); + if (text === "") return null; + + const textLower = text.toLowerCase(); + const actualStartPosition = [...keywords] + .map((k) => textLower.indexOf(k.toLowerCase())) + .filter((i) => i > -1) + .slice(-1)[0]; + const startWithContext = Math.max(actualStartPosition - 120, 0); + + const top = startWithContext === 0 ? "" : "..."; + const tail = startWithContext + 240 < text.length ? "..." : ""; + + let summary = document.createElement("p"); + summary.classList.add("context"); + summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; + + return summary; + }, +}; + +_ready(Search.init); diff --git a/sphinx/_build/html/_static/sphinx_highlight.js b/sphinx/_build/html/_static/sphinx_highlight.js new file mode 100644 index 000000000..8a96c69a1 --- /dev/null +++ b/sphinx/_build/html/_static/sphinx_highlight.js @@ -0,0 +1,154 @@ +/* Highlighting utilities for Sphinx HTML documentation. */ +"use strict"; + +const SPHINX_HIGHLIGHT_ENABLED = true + +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + const rest = document.createTextNode(val.substr(pos + text.length)); + parent.insertBefore( + span, + parent.insertBefore( + rest, + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + /* There may be more occurrences of search term in this node. So call this + * function recursively on the remaining fragment. + */ + _highlight(rest, addItems, text, className); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + +/** + * Small JavaScript module for the documentation. + */ +const SphinxHighlight = { + + /** + * highlight the search words provided in localstorage in the text + */ + highlightSearchWords: () => { + if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight + + // get and clear terms from localstorage + const url = new URL(window.location); + const highlight = + localStorage.getItem("sphinx_highlight_terms") + || url.searchParams.get("highlight") + || ""; + localStorage.removeItem("sphinx_highlight_terms") + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + + // get individual terms from highlight string + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + localStorage.removeItem("sphinx_highlight_terms") + }, + + initEscapeListener: () => { + // only install a listener if it is really needed + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; + if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { + SphinxHighlight.hideSearchWords(); + event.preventDefault(); + } + }); + }, +}; + +_ready(() => { + /* Do not call highlightSearchWords() when we are on the search page. + * It will highlight words from the *previous* search query. + */ + if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); + SphinxHighlight.initEscapeListener(); +}); diff --git a/sphinx/_build/html/genindex.html b/sphinx/_build/html/genindex.html new file mode 100644 index 000000000..2b1adec04 --- /dev/null +++ b/sphinx/_build/html/genindex.html @@ -0,0 +1,6735 @@ + + + + + + Index — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + +

Index

+ +
+ _ + | A + | B + | C + | D + | E + | F + | G + | H + | I + | K + | L + | M + | N + | O + | P + | R + | S + | T + | U + | V + | W + +
+

_

+ + +
+ +

A

+ + + +
+ +

B

+ + +
+ +

C

+ + + +
+ +

D

+ + + +
+ +

E

+ + + +
+ +

F

+ + + +
+ +

G

+ + + +
+ +

H

+ + + +
+ +

I

+ + + +
+ +

K

+ + + +
+ +

L

+ + + +
+ +

M

+ + + +
+ +

N

+ + + +
+ +

O

+ + + +
+ +

P

+ + + +
    +
  • pairSmallerThan() (PAMI.subgraphMining.basic.extendedEdge.ExtendedEdge method) + +
  • +
  • + PAMI + +
  • +
  • + PAMI.AssociationRules + +
  • +
  • + PAMI.AssociationRules.basic + +
  • +
  • + PAMI.AssociationRules.basic.abstract + +
  • +
  • + PAMI.AssociationRules.basic.ARWithConfidence + +
  • +
  • + PAMI.AssociationRules.basic.ARWithLeverage + +
  • +
  • + PAMI.AssociationRules.basic.ARWithLift + +
  • +
  • + PAMI.AssociationRules.basic.RuleMiner + +
  • +
  • + PAMI.correlatedPattern + +
  • +
  • + PAMI.correlatedPattern.basic + +
  • +
  • + PAMI.correlatedPattern.basic.abstract + +
  • +
  • + PAMI.correlatedPattern.basic.CoMine + +
  • +
  • + PAMI.correlatedPattern.basic.CoMinePlus + +
  • +
  • + PAMI.coveragePattern + +
  • +
  • + PAMI.coveragePattern.basic + +
  • +
  • + PAMI.coveragePattern.basic.abstract + +
  • +
  • + PAMI.coveragePattern.basic.CMine + +
  • +
  • + PAMI.coveragePattern.basic.CPPG + +
  • +
  • + PAMI.extras + +
  • +
  • + PAMI.extras.calculateMISValues + +
  • +
  • + PAMI.extras.calculateMISValues.usingBeta + +
  • +
  • + PAMI.extras.calculateMISValues.usingSD + +
  • +
  • + PAMI.extras.dbStats + +
  • +
  • + PAMI.extras.dbStats.FuzzyDatabase + +
  • +
  • + PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats + +
  • +
  • + PAMI.extras.dbStats.SequentialDatabase + +
  • +
  • + PAMI.extras.dbStats.TemporalDatabase + +
  • +
  • + PAMI.extras.dbStats.TransactionalDatabase + +
  • +
  • + PAMI.extras.dbStats.UncertainTemporalDatabase + +
  • +
  • + PAMI.extras.dbStats.UncertainTransactionalDatabase + +
  • +
  • + PAMI.extras.dbStats.UtilityDatabase + +
  • +
  • + PAMI.extras.DF2DB + +
  • +
  • + PAMI.extras.DF2DB.createTDB + +
  • +
  • + PAMI.extras.DF2DB.denseDF2DB_dump + +
  • +
  • + PAMI.extras.DF2DB.denseDF2DBPlus + +
  • +
  • + PAMI.extras.DF2DB.DenseFormatDF + +
  • +
  • + PAMI.extras.DF2DB.DF2DB + +
  • +
  • + PAMI.extras.DF2DB.sparseDF2DBPlus + +
  • +
  • + PAMI.extras.DF2DB.SparseFormatDF + +
  • +
  • + PAMI.extras.fuzzyTransformation + +
  • +
  • + PAMI.extras.fuzzyTransformation.abstract + +
  • +
  • + PAMI.extras.fuzzyTransformation.temporalToFuzzy + +
  • +
  • + PAMI.extras.fuzzyTransformation.transactionalToFuzzy + +
  • +
  • + PAMI.extras.generateDatabase + +
  • +
  • + PAMI.extras.generateDatabase.generateSpatioTemporalDatabase + +
  • +
  • + PAMI.extras.generateDatabase.generateTemporalDatabase + +
  • +
  • + PAMI.extras.generateDatabase.generateTransactionalDatabase + +
  • +
  • + PAMI.extras.generateLatexGraphFile + +
  • +
  • + PAMI.extras.graph + +
  • +
  • + PAMI.extras.graph.DF2Fig + +
  • +
  • + PAMI.extras.graph.plotLineGraphFromDictionary + +
  • +
  • + PAMI.extras.graph.plotLineGraphsFromDataFrame + +
  • +
  • + PAMI.extras.graph.visualizeFuzzyPatterns + +
  • +
  • + PAMI.extras.graph.visualizePatterns + +
  • +
  • + PAMI.extras.image2Database + +
  • +
  • + PAMI.extras.imageProcessing + +
  • +
  • + PAMI.extras.imageProcessing.imagery2Databases + +
  • +
  • + PAMI.extras.messaging + +
  • +
  • + PAMI.extras.messaging.discord + +
  • +
  • + PAMI.extras.messaging.gmail + +
  • +
  • + PAMI.extras.neighbours + +
  • +
  • + PAMI.extras.neighbours.findNeighborsUsingEuclideanDistanceforPointInfo + +
  • +
  • + PAMI.extras.neighbours.findNeighboursUsingEuclidean + +
  • +
  • + PAMI.extras.neighbours.findNeighboursUsingGeodesic + +
  • +
  • + PAMI.extras.plotPointOnMap + +
  • +
  • + PAMI.extras.plotPointOnMap_dump + +
  • +
  • + PAMI.extras.sampleDatasets + +
  • +
  • + PAMI.extras.scatterPlotSpatialPoints + +
  • +
  • + PAMI.extras.stats + +
  • +
  • + PAMI.extras.stats.graphDatabase + +
  • +
  • + PAMI.extras.stats.sequentialDatabase + +
  • +
  • + PAMI.extras.stats.temporalDatabase + +
  • +
  • + PAMI.extras.stats.TransactionalDatabase + +
  • +
  • + PAMI.extras.stats.utilityDatabase + +
  • +
  • + PAMI.extras.syntheticDataGenerator + +
  • +
  • + PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTemporal + +
  • +
  • + PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTransactions + +
  • +
  • + PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialUncertainTransaction + +
  • +
  • + PAMI.extras.syntheticDataGenerator.createSyntheticTemporal + +
  • +
  • + PAMI.extras.syntheticDataGenerator.createSyntheticTransactions + +
  • +
  • + PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTemporal + +
  • +
  • + PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTransactions + +
  • +
  • + PAMI.extras.syntheticDataGenerator.createSyntheticUtility + +
  • +
  • + PAMI.extras.syntheticDataGenerator.fuzzyDatabase + +
  • +
  • + PAMI.extras.syntheticDataGenerator.generateTemporal + +
  • +
  • + PAMI.extras.syntheticDataGenerator.generateTransactional + +
  • +
  • + PAMI.extras.syntheticDataGenerator.generateUncertainTemporal + +
  • +
  • + PAMI.extras.syntheticDataGenerator.generateUncertainTransactional + +
  • +
  • + PAMI.extras.syntheticDataGenerator.generateUtilityTemporal + +
  • +
  • + PAMI.extras.syntheticDataGenerator.generateUtilityTransactional + +
  • +
  • + PAMI.extras.syntheticDataGenerator.georeferencedTemporalDatabase + +
  • +
  • + PAMI.extras.syntheticDataGenerator.georeferencedTransactionalDatabase + +
  • +
  • + PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase + +
  • +
  • + PAMI.extras.syntheticDataGenerator.TemporalDatabase + +
  • +
  • + PAMI.extras.syntheticDataGenerator.temporalDatabaseGen + +
  • +
  • + PAMI.extras.syntheticDataGenerator.TransactionalDatabase + +
  • +
  • + PAMI.extras.syntheticDataGenerator.utilityDatabase + +
  • +
  • + PAMI.extras.topKPatterns + +
  • +
  • + PAMI.extras.uncertaindb_convert + +
  • +
  • + PAMI.extras.visualize + +
  • +
  • + PAMI.extras.visualize.graphs + +
  • +
  • + PAMI.faultTolerantFrequentPattern + +
  • +
  • + PAMI.faultTolerantFrequentPattern.basic + +
  • +
  • + PAMI.faultTolerantFrequentPattern.basic.abstract + +
  • +
  • + PAMI.faultTolerantFrequentPattern.basic.FTApriori + +
  • +
  • + PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth + +
  • +
  • + PAMI.frequentPattern + +
  • +
  • + PAMI.frequentPattern.basic + +
  • +
  • + PAMI.frequentPattern.basic.abstract + +
  • +
  • + PAMI.frequentPattern.basic.Apriori + +
  • +
  • + PAMI.frequentPattern.basic.ECLAT + +
  • +
  • + PAMI.frequentPattern.basic.ECLATbitset + +
  • +
  • + PAMI.frequentPattern.basic.ECLATDiffset + +
  • +
  • + PAMI.frequentPattern.basic.FPGrowth + +
  • +
  • + PAMI.frequentPattern.closed + +
  • +
  • + PAMI.frequentPattern.closed.abstract + +
  • +
  • + PAMI.frequentPattern.closed.CHARM + +
  • +
  • + PAMI.frequentPattern.cuda + +
  • +
  • + PAMI.frequentPattern.maximal + +
  • +
  • + PAMI.frequentPattern.maximal.abstract + +
  • +
  • + PAMI.frequentPattern.maximal.MaxFPGrowth + +
  • +
  • + PAMI.frequentPattern.pyspark + +
  • +
  • + PAMI.frequentPattern.topk + +
  • +
  • + PAMI.frequentPattern.topk.abstract + +
  • +
  • + PAMI.frequentPattern.topk.FAE + +
  • +
  • + PAMI.fuzzyCorrelatedPattern + +
  • +
  • + PAMI.fuzzyCorrelatedPattern.basic + +
  • +
  • + PAMI.fuzzyCorrelatedPattern.basic.abstract + +
  • +
  • + PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth + +
  • +
  • + PAMI.fuzzyFrequentPattern + +
  • +
  • + PAMI.fuzzyFrequentPattern.basic + +
  • +
  • + PAMI.fuzzyFrequentPattern.basic.abstract + +
  • +
  • + PAMI.fuzzyFrequentPattern.basic.FFIMiner + +
  • +
  • + PAMI.fuzzyFrequentPattern.basic.FFIMiner_old + +
  • +
  • + PAMI.fuzzyGeoreferencedFrequentPattern + +
  • +
  • + PAMI.fuzzyGeoreferencedFrequentPattern.basic + +
  • +
  • + PAMI.fuzzyGeoreferencedFrequentPattern.basic.abstract + +
  • +
  • + PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner + +
  • +
  • + PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old + +
  • +
  • + PAMI.fuzzyGeoreferencedPeriodicFrequentPattern + +
  • +
  • + PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic + +
  • +
  • + PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.abstract + +
  • +
  • + PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner + +
  • +
  • + PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old + +
  • +
  • + PAMI.fuzzyPartialPeriodicPatterns + +
  • +
  • + PAMI.fuzzyPartialPeriodicPatterns.basic + +
  • +
  • + PAMI.fuzzyPartialPeriodicPatterns.basic.abstract + +
  • +
  • + PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner + +
  • +
  • + PAMI.fuzzyPeriodicFrequentPattern + +
  • +
  • + PAMI.fuzzyPeriodicFrequentPattern.basic + +
  • +
  • + PAMI.fuzzyPeriodicFrequentPattern.basic.abstract + +
  • +
  • + PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner + +
  • +
  • + PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old + +
  • +
  • + PAMI.georeferencedFrequentPattern + +
  • +
  • + PAMI.georeferencedFrequentPattern.basic + +
  • +
  • + PAMI.georeferencedFrequentPattern.basic.abstract + +
  • +
  • + PAMI.georeferencedFrequentPattern.basic.SpatialECLAT + +
  • +
  • + PAMI.georeferencedFrequentSequencePattern + +
  • +
  • + PAMI.georeferencedFrequentSequencePattern.abstract + +
  • +
  • + PAMI.georeferencedPartialPeriodicPattern + +
  • +
  • + PAMI.georeferencedPartialPeriodicPattern.basic + +
  • +
  • + PAMI.georeferencedPartialPeriodicPattern.basic.abstract + +
  • +
  • + PAMI.georeferencedPartialPeriodicPattern.basic.STEclat + +
  • +
  • + PAMI.geoReferencedPeriodicFrequentPattern + +
  • +
  • + PAMI.geoReferencedPeriodicFrequentPattern.basic + +
  • +
  • + PAMI.geoReferencedPeriodicFrequentPattern.basic.abstract + +
  • +
  • + PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner + +
  • +
  • + PAMI.highUtilityFrequentPattern + +
  • +
  • + PAMI.highUtilityFrequentPattern.basic + +
  • +
  • + PAMI.highUtilityFrequentPattern.basic.abstract + +
  • +
  • + PAMI.highUtilityFrequentPattern.basic.HUFIM + +
  • +
  • + PAMI.highUtilityGeoreferencedFrequentPattern + +
  • +
  • + PAMI.highUtilityGeoreferencedFrequentPattern.basic + +
  • +
  • + PAMI.highUtilityGeoreferencedFrequentPattern.basic.abstract + +
  • +
  • + PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM + +
  • +
  • + PAMI.highUtilityPattern + +
  • +
  • + PAMI.highUtilityPattern.basic + +
  • +
  • + PAMI.highUtilityPattern.basic.abstract + +
  • +
  • + PAMI.highUtilityPattern.basic.EFIM + +
  • +
  • + PAMI.highUtilityPattern.basic.HMiner + +
  • +
  • + PAMI.highUtilityPattern.basic.UPGrowth + +
  • +
  • + PAMI.highUtilityPattern.parallel + +
  • +
  • + PAMI.highUtilityPattern.parallel.abstract + +
  • +
  • + PAMI.highUtilityPatternsInStreams + +
  • +
  • + PAMI.highUtilityPatternsInStreams.abstract + +
  • +
  • + PAMI.highUtilitySpatialPattern + +
  • +
  • + PAMI.highUtilitySpatialPattern.abstract + +
  • +
  • + PAMI.highUtilitySpatialPattern.basic + +
  • +
  • + PAMI.highUtilitySpatialPattern.basic.abstract + +
  • +
  • + PAMI.highUtilitySpatialPattern.basic.HDSHUIM + +
  • +
  • + PAMI.highUtilitySpatialPattern.basic.SHUIM + +
  • +
  • + PAMI.highUtilitySpatialPattern.topk + +
  • +
  • + PAMI.highUtilitySpatialPattern.topk.abstract + +
  • +
  • + PAMI.highUtilitySpatialPattern.topk.TKSHUIM + +
  • +
  • + PAMI.localPeriodicPattern + +
  • +
  • + PAMI.localPeriodicPattern.basic + +
  • +
  • + PAMI.localPeriodicPattern.basic.abstract + +
  • +
  • + PAMI.localPeriodicPattern.basic.LPPGrowth + +
  • +
  • + PAMI.localPeriodicPattern.basic.LPPMBreadth + +
  • +
  • + PAMI.localPeriodicPattern.basic.LPPMDepth + +
  • +
  • + PAMI.multipleMinimumSupportBasedFrequentPattern + +
  • +
  • + PAMI.multipleMinimumSupportBasedFrequentPattern.basic + +
  • +
  • + PAMI.multipleMinimumSupportBasedFrequentPattern.basic.abstract + +
  • +
+ +

R

+ + + +
+ +

S

+ + + +
+ +

T

+ + + +
+ +

U

+ + + +
+ +

V

+ + + +
+ +

W

+ + + +
+ + + +
+
+
+ +
+ +
+

© Copyright 2024, RAGE Uday Kiran.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/index.html b/sphinx/_build/html/index.html new file mode 100644 index 000000000..fd134f3a4 --- /dev/null +++ b/sphinx/_build/html/index.html @@ -0,0 +1,124 @@ + + + + + + + Welcome to PAMI’s documentation! — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Welcome to PAMI’s documentation!

+
+

Contents:

+ +
+
+
+

Indices and tables

+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/modules.html b/sphinx/_build/html/modules.html new file mode 100644 index 000000000..2fb7b1b25 --- /dev/null +++ b/sphinx/_build/html/modules.html @@ -0,0 +1,355 @@ + + + + + + + PAMI — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

PAMI

+
+ +
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/objects.inv b/sphinx/_build/html/objects.inv new file mode 100644 index 000000000..cef88aff1 Binary files /dev/null and b/sphinx/_build/html/objects.inv differ diff --git a/sphinx/_build/html/py-modindex.html b/sphinx/_build/html/py-modindex.html new file mode 100644 index 000000000..607d047f3 --- /dev/null +++ b/sphinx/_build/html/py-modindex.html @@ -0,0 +1,1800 @@ + + + + + + Python Module Index — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + +

Python Module Index

+ +
+ p +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 
+ p
+ PAMI +
    + PAMI.AssociationRules +
    + PAMI.AssociationRules.basic +
    + PAMI.AssociationRules.basic.abstract +
    + PAMI.AssociationRules.basic.ARWithConfidence +
    + PAMI.AssociationRules.basic.ARWithLeverage +
    + PAMI.AssociationRules.basic.ARWithLift +
    + PAMI.AssociationRules.basic.RuleMiner +
    + PAMI.correlatedPattern +
    + PAMI.correlatedPattern.basic +
    + PAMI.correlatedPattern.basic.abstract +
    + PAMI.correlatedPattern.basic.CoMine +
    + PAMI.correlatedPattern.basic.CoMinePlus +
    + PAMI.coveragePattern +
    + PAMI.coveragePattern.basic +
    + PAMI.coveragePattern.basic.abstract +
    + PAMI.coveragePattern.basic.CMine +
    + PAMI.coveragePattern.basic.CPPG +
    + PAMI.extras +
    + PAMI.extras.calculateMISValues +
    + PAMI.extras.calculateMISValues.usingBeta +
    + PAMI.extras.calculateMISValues.usingSD +
    + PAMI.extras.dbStats +
    + PAMI.extras.dbStats.FuzzyDatabase +
    + PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats +
    + PAMI.extras.dbStats.SequentialDatabase +
    + PAMI.extras.dbStats.TemporalDatabase +
    + PAMI.extras.dbStats.TransactionalDatabase +
    + PAMI.extras.dbStats.UncertainTemporalDatabase +
    + PAMI.extras.dbStats.UncertainTransactionalDatabase +
    + PAMI.extras.dbStats.UtilityDatabase +
    + PAMI.extras.DF2DB +
    + PAMI.extras.DF2DB.createTDB +
    + PAMI.extras.DF2DB.denseDF2DB_dump +
    + PAMI.extras.DF2DB.denseDF2DBPlus +
    + PAMI.extras.DF2DB.DenseFormatDF +
    + PAMI.extras.DF2DB.DF2DB +
    + PAMI.extras.DF2DB.sparseDF2DBPlus +
    + PAMI.extras.DF2DB.SparseFormatDF +
    + PAMI.extras.fuzzyTransformation +
    + PAMI.extras.fuzzyTransformation.abstract +
    + PAMI.extras.fuzzyTransformation.temporalToFuzzy +
    + PAMI.extras.fuzzyTransformation.transactionalToFuzzy +
    + PAMI.extras.generateDatabase +
    + PAMI.extras.generateDatabase.generateSpatioTemporalDatabase +
    + PAMI.extras.generateDatabase.generateTemporalDatabase +
    + PAMI.extras.generateDatabase.generateTransactionalDatabase +
    + PAMI.extras.generateLatexGraphFile +
    + PAMI.extras.graph +
    + PAMI.extras.graph.DF2Fig +
    + PAMI.extras.graph.plotLineGraphFromDictionary +
    + PAMI.extras.graph.plotLineGraphsFromDataFrame +
    + PAMI.extras.graph.visualizeFuzzyPatterns +
    + PAMI.extras.graph.visualizePatterns +
    + PAMI.extras.image2Database +
    + PAMI.extras.imageProcessing +
    + PAMI.extras.imageProcessing.imagery2Databases +
    + PAMI.extras.messaging +
    + PAMI.extras.messaging.discord +
    + PAMI.extras.messaging.gmail +
    + PAMI.extras.neighbours +
    + PAMI.extras.neighbours.findNeighborsUsingEuclideanDistanceforPointInfo +
    + PAMI.extras.neighbours.findNeighboursUsingEuclidean +
    + PAMI.extras.neighbours.findNeighboursUsingGeodesic +
    + PAMI.extras.plotPointOnMap +
    + PAMI.extras.plotPointOnMap_dump +
    + PAMI.extras.sampleDatasets +
    + PAMI.extras.scatterPlotSpatialPoints +
    + PAMI.extras.stats +
    + PAMI.extras.stats.graphDatabase +
    + PAMI.extras.stats.sequentialDatabase +
    + PAMI.extras.stats.temporalDatabase +
    + PAMI.extras.stats.TransactionalDatabase +
    + PAMI.extras.stats.utilityDatabase +
    + PAMI.extras.syntheticDataGenerator +
    + PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTemporal +
    + PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTransactions +
    + PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialUncertainTransaction +
    + PAMI.extras.syntheticDataGenerator.createSyntheticTemporal +
    + PAMI.extras.syntheticDataGenerator.createSyntheticTransactions +
    + PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTemporal +
    + PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTransactions +
    + PAMI.extras.syntheticDataGenerator.createSyntheticUtility +
    + PAMI.extras.syntheticDataGenerator.fuzzyDatabase +
    + PAMI.extras.syntheticDataGenerator.generateTemporal +
    + PAMI.extras.syntheticDataGenerator.generateTransactional +
    + PAMI.extras.syntheticDataGenerator.generateUncertainTemporal +
    + PAMI.extras.syntheticDataGenerator.generateUncertainTransactional +
    + PAMI.extras.syntheticDataGenerator.generateUtilityTemporal +
    + PAMI.extras.syntheticDataGenerator.generateUtilityTransactional +
    + PAMI.extras.syntheticDataGenerator.georeferencedTemporalDatabase +
    + PAMI.extras.syntheticDataGenerator.georeferencedTransactionalDatabase +
    + PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase +
    + PAMI.extras.syntheticDataGenerator.TemporalDatabase +
    + PAMI.extras.syntheticDataGenerator.temporalDatabaseGen +
    + PAMI.extras.syntheticDataGenerator.TransactionalDatabase +
    + PAMI.extras.syntheticDataGenerator.utilityDatabase +
    + PAMI.extras.topKPatterns +
    + PAMI.extras.uncertaindb_convert +
    + PAMI.extras.visualize +
    + PAMI.extras.visualize.graphs +
    + PAMI.faultTolerantFrequentPattern +
    + PAMI.faultTolerantFrequentPattern.basic +
    + PAMI.faultTolerantFrequentPattern.basic.abstract +
    + PAMI.faultTolerantFrequentPattern.basic.FTApriori +
    + PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth +
    + PAMI.frequentPattern +
    + PAMI.frequentPattern.basic +
    + PAMI.frequentPattern.basic.abstract +
    + PAMI.frequentPattern.basic.Apriori +
    + PAMI.frequentPattern.basic.ECLAT +
    + PAMI.frequentPattern.basic.ECLATbitset +
    + PAMI.frequentPattern.basic.ECLATDiffset +
    + PAMI.frequentPattern.basic.FPGrowth +
    + PAMI.frequentPattern.closed +
    + PAMI.frequentPattern.closed.abstract +
    + PAMI.frequentPattern.closed.CHARM +
    + PAMI.frequentPattern.cuda +
    + PAMI.frequentPattern.maximal +
    + PAMI.frequentPattern.maximal.abstract +
    + PAMI.frequentPattern.maximal.MaxFPGrowth +
    + PAMI.frequentPattern.pyspark +
    + PAMI.frequentPattern.topk +
    + PAMI.frequentPattern.topk.abstract +
    + PAMI.frequentPattern.topk.FAE +
    + PAMI.fuzzyCorrelatedPattern +
    + PAMI.fuzzyCorrelatedPattern.basic +
    + PAMI.fuzzyCorrelatedPattern.basic.abstract +
    + PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth +
    + PAMI.fuzzyFrequentPattern +
    + PAMI.fuzzyFrequentPattern.basic +
    + PAMI.fuzzyFrequentPattern.basic.abstract +
    + PAMI.fuzzyFrequentPattern.basic.FFIMiner +
    + PAMI.fuzzyFrequentPattern.basic.FFIMiner_old +
    + PAMI.fuzzyGeoreferencedFrequentPattern +
    + PAMI.fuzzyGeoreferencedFrequentPattern.basic +
    + PAMI.fuzzyGeoreferencedFrequentPattern.basic.abstract +
    + PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner +
    + PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old +
    + PAMI.fuzzyGeoreferencedPeriodicFrequentPattern +
    + PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic +
    + PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.abstract +
    + PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner +
    + PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old +
    + PAMI.fuzzyPartialPeriodicPatterns +
    + PAMI.fuzzyPartialPeriodicPatterns.basic +
    + PAMI.fuzzyPartialPeriodicPatterns.basic.abstract +
    + PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner +
    + PAMI.fuzzyPeriodicFrequentPattern +
    + PAMI.fuzzyPeriodicFrequentPattern.basic +
    + PAMI.fuzzyPeriodicFrequentPattern.basic.abstract +
    + PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner +
    + PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old +
    + PAMI.georeferencedFrequentPattern +
    + PAMI.georeferencedFrequentPattern.basic +
    + PAMI.georeferencedFrequentPattern.basic.abstract +
    + PAMI.georeferencedFrequentPattern.basic.SpatialECLAT +
    + PAMI.georeferencedFrequentSequencePattern +
    + PAMI.georeferencedFrequentSequencePattern.abstract +
    + PAMI.georeferencedPartialPeriodicPattern +
    + PAMI.georeferencedPartialPeriodicPattern.basic +
    + PAMI.georeferencedPartialPeriodicPattern.basic.abstract +
    + PAMI.georeferencedPartialPeriodicPattern.basic.STEclat +
    + PAMI.geoReferencedPeriodicFrequentPattern +
    + PAMI.geoReferencedPeriodicFrequentPattern.basic +
    + PAMI.geoReferencedPeriodicFrequentPattern.basic.abstract +
    + PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner +
    + PAMI.highUtilityFrequentPattern +
    + PAMI.highUtilityFrequentPattern.basic +
    + PAMI.highUtilityFrequentPattern.basic.abstract +
    + PAMI.highUtilityFrequentPattern.basic.HUFIM +
    + PAMI.highUtilityGeoreferencedFrequentPattern +
    + PAMI.highUtilityGeoreferencedFrequentPattern.basic +
    + PAMI.highUtilityGeoreferencedFrequentPattern.basic.abstract +
    + PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM +
    + PAMI.highUtilityPattern +
    + PAMI.highUtilityPattern.basic +
    + PAMI.highUtilityPattern.basic.abstract +
    + PAMI.highUtilityPattern.basic.EFIM +
    + PAMI.highUtilityPattern.basic.HMiner +
    + PAMI.highUtilityPattern.basic.UPGrowth +
    + PAMI.highUtilityPattern.parallel +
    + PAMI.highUtilityPattern.parallel.abstract +
    + PAMI.highUtilityPatternsInStreams +
    + PAMI.highUtilityPatternsInStreams.abstract +
    + PAMI.highUtilitySpatialPattern +
    + PAMI.highUtilitySpatialPattern.abstract +
    + PAMI.highUtilitySpatialPattern.basic +
    + PAMI.highUtilitySpatialPattern.basic.abstract +
    + PAMI.highUtilitySpatialPattern.basic.HDSHUIM +
    + PAMI.highUtilitySpatialPattern.basic.SHUIM +
    + PAMI.highUtilitySpatialPattern.topk +
    + PAMI.highUtilitySpatialPattern.topk.abstract +
    + PAMI.highUtilitySpatialPattern.topk.TKSHUIM +
    + PAMI.localPeriodicPattern +
    + PAMI.localPeriodicPattern.basic +
    + PAMI.localPeriodicPattern.basic.abstract +
    + PAMI.localPeriodicPattern.basic.LPPGrowth +
    + PAMI.localPeriodicPattern.basic.LPPMBreadth +
    + PAMI.localPeriodicPattern.basic.LPPMDepth +
    + PAMI.multipleMinimumSupportBasedFrequentPattern +
    + PAMI.multipleMinimumSupportBasedFrequentPattern.basic +
    + PAMI.multipleMinimumSupportBasedFrequentPattern.basic.abstract +
    + PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth +
    + PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus +
    + PAMI.partialPeriodicFrequentPattern +
    + PAMI.partialPeriodicFrequentPattern.basic +
    + PAMI.partialPeriodicFrequentPattern.basic.abstract +
    + PAMI.partialPeriodicPattern +
    + PAMI.partialPeriodicPattern.basic +
    + PAMI.partialPeriodicPattern.basic.abstract +
    + PAMI.partialPeriodicPattern.basic.Gabstract +
    + PAMI.partialPeriodicPattern.basic.PPP_ECLAT +
    + PAMI.partialPeriodicPattern.basic.PPPGrowth +
    + PAMI.partialPeriodicPattern.closed +
    + PAMI.partialPeriodicPattern.closed.abstract +
    + PAMI.partialPeriodicPattern.closed.PPPClose +
    + PAMI.partialPeriodicPattern.maximal +
    + PAMI.partialPeriodicPattern.maximal.abstract +
    + PAMI.partialPeriodicPattern.pyspark +
    + PAMI.partialPeriodicPattern.pyspark.abstract +
    + PAMI.partialPeriodicPattern.topk +
    + PAMI.partialPeriodicPattern.topk.abstract +
    + PAMI.partialPeriodicPattern.topk.k3PMiner +
    + PAMI.partialPeriodicPatternInMultipleTimeSeries +
    + PAMI.partialPeriodicPatternInMultipleTimeSeries.abstract +
    + PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth +
    + PAMI.periodicCorrelatedPattern +
    + PAMI.periodicCorrelatedPattern.basic +
    + PAMI.periodicCorrelatedPattern.basic.abstract +
    + PAMI.periodicCorrelatedPattern.basic.EPCPGrowth +
    + PAMI.periodicFrequentPattern +
    + PAMI.periodicFrequentPattern.basic +
    + PAMI.periodicFrequentPattern.basic.abstract +
    + PAMI.periodicFrequentPattern.basic.PFECLAT +
    + PAMI.periodicFrequentPattern.basic.PFPGrowth +
    + PAMI.periodicFrequentPattern.basic.PFPGrowthPlus +
    + PAMI.periodicFrequentPattern.basic.PFPMC +
    + PAMI.periodicFrequentPattern.basic.PSGrowth +
    + PAMI.periodicFrequentPattern.closed +
    + PAMI.periodicFrequentPattern.closed.abstract +
    + PAMI.periodicFrequentPattern.closed.CPFPMiner +
    + PAMI.periodicFrequentPattern.cuda +
    + PAMI.periodicFrequentPattern.maximal +
    + PAMI.periodicFrequentPattern.maximal.abstract +
    + PAMI.periodicFrequentPattern.maximal.MaxPFGrowth +
    + PAMI.periodicFrequentPattern.pyspark +
    + PAMI.periodicFrequentPattern.topk +
    + PAMI.periodicFrequentPattern.topk.kPFPMiner +
    + PAMI.periodicFrequentPattern.topk.kPFPMiner.abstract +
    + PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner +
    + PAMI.periodicFrequentPattern.topk.TopkPFP +
    + PAMI.periodicFrequentPattern.topk.TopkPFP.abstract +
    + PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP +
    + PAMI.recurringPattern +
    + PAMI.recurringPattern.basic +
    + PAMI.recurringPattern.basic.abstract +
    + PAMI.recurringPattern.basic.RPGrowth +
    + PAMI.relativeFrequentPattern +
    + PAMI.relativeFrequentPattern.basic +
    + PAMI.relativeFrequentPattern.basic.abstract +
    + PAMI.relativeFrequentPattern.basic.RSFPGrowth +
    + PAMI.relativeHighUtilityPattern +
    + PAMI.relativeHighUtilityPattern.basic +
    + PAMI.relativeHighUtilityPattern.basic.abstract +
    + PAMI.relativeHighUtilityPattern.basic.RHUIM +
    + PAMI.sequence +
    + PAMI.sequentialPatternMining +
    + PAMI.sequentialPatternMining.basic +
    + PAMI.sequentialPatternMining.basic.abstract +
    + PAMI.sequentialPatternMining.basic.prefixSpan +
    + PAMI.sequentialPatternMining.basic.SPADE +
    + PAMI.sequentialPatternMining.basic.SPAM +
    + PAMI.sequentialPatternMining.closed +
    + PAMI.sequentialPatternMining.closed.abstract +
    + PAMI.sequentialPatternMining.closed.bide +
    + PAMI.stablePeriodicFrequentPattern +
    + PAMI.stablePeriodicFrequentPattern.basic +
    + PAMI.stablePeriodicFrequentPattern.basic.abstract +
    + PAMI.stablePeriodicFrequentPattern.basic.SPPEclat +
    + PAMI.stablePeriodicFrequentPattern.topK +
    + PAMI.stablePeriodicFrequentPattern.topK.abstract +
    + PAMI.stablePeriodicFrequentPattern.topK.TSPIN +
    + PAMI.subgraphMining +
    + PAMI.subgraphMining.basic +
    + PAMI.subgraphMining.basic.abstract +
    + PAMI.subgraphMining.basic.dfsCode +
    + PAMI.subgraphMining.basic.edge +
    + PAMI.subgraphMining.basic.extendedEdge +
    + PAMI.subgraphMining.basic.frequentSubgraph +
    + PAMI.subgraphMining.basic.graph +
    + PAMI.subgraphMining.basic.gspan +
    + PAMI.subgraphMining.basic.sparseTriangularMatrix +
    + PAMI.subgraphMining.basic.vertex +
    + PAMI.subgraphMining.topK +
    + PAMI.subgraphMining.topK.abstract +
    + PAMI.subgraphMining.topK.DFSCode +
    + PAMI.subgraphMining.topK.DFSThread +
    + PAMI.subgraphMining.topK.edge +
    + PAMI.subgraphMining.topK.extendedEdge +
    + PAMI.subgraphMining.topK.frequentSubgraph +
    + PAMI.subgraphMining.topK.graph +
    + PAMI.subgraphMining.topK.sparseTriangularMatrix +
    + PAMI.subgraphMining.topK.tkg +
    + PAMI.subgraphMining.topK.vertex +
    + PAMI.uncertainFaultTolerantFrequentPattern +
    + PAMI.uncertainFaultTolerantFrequentPattern.abstract +
    + PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine +
    + PAMI.uncertainFrequentPattern +
    + PAMI.uncertainFrequentPattern.basic +
    + PAMI.uncertainFrequentPattern.basic.abstract +
    + PAMI.uncertainFrequentPattern.basic.CUFPTree +
    + PAMI.uncertainGeoreferencedFrequentPattern +
    + PAMI.uncertainGeoreferencedFrequentPattern.basic +
    + PAMI.uncertainGeoreferencedFrequentPattern.basic.abstract +
    + PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth +
    + PAMI.uncertainPeriodicFrequentPattern +
    + PAMI.uncertainPeriodicFrequentPattern.basic +
    + PAMI.uncertainPeriodicFrequentPattern.basic.abstract +
    + PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth +
    + PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus +
    + PAMI.weightedFrequentNeighbourhoodPattern +
    + PAMI.weightedFrequentNeighbourhoodPattern.basic +
    + PAMI.weightedFrequentNeighbourhoodPattern.basic.abstract +
    + PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth +
    + PAMI.weightedFrequentPattern +
    + PAMI.weightedFrequentPattern.basic +
    + PAMI.weightedFrequentPattern.basic.abstract +
    + PAMI.weightedFrequentPattern.basic.WFIM +
    + PAMI.weightedFrequentRegularPattern +
    + PAMI.weightedFrequentRegularPattern.basic +
    + PAMI.weightedFrequentRegularPattern.basic.abstract +
    + PAMI.weightedFrequentRegularPattern.basic.WFRIMiner +
    + PAMI.weightedUncertainFrequentPattern +
    + PAMI.weightedUncertainFrequentPattern.basic +
    + PAMI.weightedUncertainFrequentPattern.basic.abstract +
    + PAMI.weightedUncertainFrequentPattern.basic.WUFIM +
+ + +
+
+
+ +
+ +
+

© Copyright 2024, RAGE Uday Kiran.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/sphinx/_build/html/search.html b/sphinx/_build/html/search.html new file mode 100644 index 000000000..7331fd1c3 --- /dev/null +++ b/sphinx/_build/html/search.html @@ -0,0 +1,120 @@ + + + + + + Search — PAMI 2024.04.23 documentation + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + + + +
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, RAGE Uday Kiran.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/sphinx/_build/html/searchindex.js b/sphinx/_build/html/searchindex.js new file mode 100644 index 000000000..4fe1f22f2 --- /dev/null +++ b/sphinx/_build/html/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({"docnames": ["PAMI", "PAMI.AssociationRules", "PAMI.AssociationRules.basic", "PAMI.correlatedPattern", "PAMI.correlatedPattern.basic", "PAMI.coveragePattern", "PAMI.coveragePattern.basic", "PAMI.extras", "PAMI.extras.DF2DB", "PAMI.extras.calculateMISValues", "PAMI.extras.dbStats", "PAMI.extras.fuzzyTransformation", "PAMI.extras.generateDatabase", "PAMI.extras.graph", "PAMI.extras.image2Database", "PAMI.extras.imageProcessing", "PAMI.extras.messaging", "PAMI.extras.neighbours", "PAMI.extras.sampleDatasets", "PAMI.extras.stats", "PAMI.extras.syntheticDataGenerator", "PAMI.extras.visualize", "PAMI.faultTolerantFrequentPattern", "PAMI.faultTolerantFrequentPattern.basic", "PAMI.frequentPattern", "PAMI.frequentPattern.basic", "PAMI.frequentPattern.closed", "PAMI.frequentPattern.cuda", "PAMI.frequentPattern.maximal", "PAMI.frequentPattern.pyspark", "PAMI.frequentPattern.topk", "PAMI.fuzzyCorrelatedPattern", "PAMI.fuzzyCorrelatedPattern.basic", "PAMI.fuzzyFrequentPattern", "PAMI.fuzzyFrequentPattern.basic", "PAMI.fuzzyGeoreferencedFrequentPattern", "PAMI.fuzzyGeoreferencedFrequentPattern.basic", "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern", "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic", "PAMI.fuzzyPartialPeriodicPatterns", "PAMI.fuzzyPartialPeriodicPatterns.basic", "PAMI.fuzzyPeriodicFrequentPattern", "PAMI.fuzzyPeriodicFrequentPattern.basic", "PAMI.geoReferencedPeriodicFrequentPattern", "PAMI.geoReferencedPeriodicFrequentPattern.basic", "PAMI.georeferencedFrequentPattern", "PAMI.georeferencedFrequentPattern.basic", "PAMI.georeferencedFrequentSequencePattern", "PAMI.georeferencedPartialPeriodicPattern", "PAMI.georeferencedPartialPeriodicPattern.basic", "PAMI.highUtilityFrequentPattern", "PAMI.highUtilityFrequentPattern.basic", "PAMI.highUtilityGeoreferencedFrequentPattern", "PAMI.highUtilityGeoreferencedFrequentPattern.basic", "PAMI.highUtilityPattern", "PAMI.highUtilityPattern.basic", "PAMI.highUtilityPattern.parallel", "PAMI.highUtilityPatternsInStreams", "PAMI.highUtilitySpatialPattern", "PAMI.highUtilitySpatialPattern.basic", "PAMI.highUtilitySpatialPattern.topk", "PAMI.localPeriodicPattern", "PAMI.localPeriodicPattern.basic", "PAMI.multipleMinimumSupportBasedFrequentPattern", "PAMI.multipleMinimumSupportBasedFrequentPattern.basic", "PAMI.partialPeriodicFrequentPattern", "PAMI.partialPeriodicFrequentPattern.basic", "PAMI.partialPeriodicPattern", "PAMI.partialPeriodicPattern.basic", "PAMI.partialPeriodicPattern.closed", "PAMI.partialPeriodicPattern.maximal", "PAMI.partialPeriodicPattern.pyspark", "PAMI.partialPeriodicPattern.topk", "PAMI.partialPeriodicPatternInMultipleTimeSeries", "PAMI.periodicCorrelatedPattern", "PAMI.periodicCorrelatedPattern.basic", "PAMI.periodicFrequentPattern", "PAMI.periodicFrequentPattern.basic", "PAMI.periodicFrequentPattern.closed", "PAMI.periodicFrequentPattern.cuda", "PAMI.periodicFrequentPattern.maximal", "PAMI.periodicFrequentPattern.pyspark", "PAMI.periodicFrequentPattern.topk", "PAMI.periodicFrequentPattern.topk.TopkPFP", "PAMI.periodicFrequentPattern.topk.kPFPMiner", "PAMI.recurringPattern", "PAMI.recurringPattern.basic", "PAMI.relativeFrequentPattern", "PAMI.relativeFrequentPattern.basic", "PAMI.relativeHighUtilityPattern", "PAMI.relativeHighUtilityPattern.basic", "PAMI.sequence", "PAMI.sequentialPatternMining", "PAMI.sequentialPatternMining.basic", "PAMI.sequentialPatternMining.closed", "PAMI.stablePeriodicFrequentPattern", "PAMI.stablePeriodicFrequentPattern.basic", "PAMI.stablePeriodicFrequentPattern.topK", "PAMI.subgraphMining", "PAMI.subgraphMining.basic", "PAMI.subgraphMining.topK", "PAMI.uncertainFaultTolerantFrequentPattern", "PAMI.uncertainFrequentPattern", "PAMI.uncertainFrequentPattern.basic", "PAMI.uncertainGeoreferencedFrequentPattern", "PAMI.uncertainGeoreferencedFrequentPattern.basic", "PAMI.uncertainPeriodicFrequentPattern", "PAMI.uncertainPeriodicFrequentPattern.basic", "PAMI.weightedFrequentNeighbourhoodPattern", "PAMI.weightedFrequentNeighbourhoodPattern.basic", "PAMI.weightedFrequentPattern", "PAMI.weightedFrequentPattern.basic", "PAMI.weightedFrequentRegularPattern", "PAMI.weightedFrequentRegularPattern.basic", "PAMI.weightedUncertainFrequentPattern", "PAMI.weightedUncertainFrequentPattern.basic", "index", "modules"], "filenames": ["PAMI.rst", "PAMI.AssociationRules.rst", "PAMI.AssociationRules.basic.rst", "PAMI.correlatedPattern.rst", "PAMI.correlatedPattern.basic.rst", "PAMI.coveragePattern.rst", "PAMI.coveragePattern.basic.rst", "PAMI.extras.rst", "PAMI.extras.DF2DB.rst", "PAMI.extras.calculateMISValues.rst", "PAMI.extras.dbStats.rst", "PAMI.extras.fuzzyTransformation.rst", "PAMI.extras.generateDatabase.rst", "PAMI.extras.graph.rst", "PAMI.extras.image2Database.rst", "PAMI.extras.imageProcessing.rst", "PAMI.extras.messaging.rst", "PAMI.extras.neighbours.rst", "PAMI.extras.sampleDatasets.rst", "PAMI.extras.stats.rst", "PAMI.extras.syntheticDataGenerator.rst", "PAMI.extras.visualize.rst", "PAMI.faultTolerantFrequentPattern.rst", "PAMI.faultTolerantFrequentPattern.basic.rst", "PAMI.frequentPattern.rst", "PAMI.frequentPattern.basic.rst", "PAMI.frequentPattern.closed.rst", "PAMI.frequentPattern.cuda.rst", "PAMI.frequentPattern.maximal.rst", "PAMI.frequentPattern.pyspark.rst", "PAMI.frequentPattern.topk.rst", "PAMI.fuzzyCorrelatedPattern.rst", "PAMI.fuzzyCorrelatedPattern.basic.rst", "PAMI.fuzzyFrequentPattern.rst", "PAMI.fuzzyFrequentPattern.basic.rst", "PAMI.fuzzyGeoreferencedFrequentPattern.rst", "PAMI.fuzzyGeoreferencedFrequentPattern.basic.rst", "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.rst", "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.rst", "PAMI.fuzzyPartialPeriodicPatterns.rst", "PAMI.fuzzyPartialPeriodicPatterns.basic.rst", "PAMI.fuzzyPeriodicFrequentPattern.rst", "PAMI.fuzzyPeriodicFrequentPattern.basic.rst", "PAMI.geoReferencedPeriodicFrequentPattern.rst", "PAMI.geoReferencedPeriodicFrequentPattern.basic.rst", "PAMI.georeferencedFrequentPattern.rst", "PAMI.georeferencedFrequentPattern.basic.rst", "PAMI.georeferencedFrequentSequencePattern.rst", "PAMI.georeferencedPartialPeriodicPattern.rst", "PAMI.georeferencedPartialPeriodicPattern.basic.rst", "PAMI.highUtilityFrequentPattern.rst", "PAMI.highUtilityFrequentPattern.basic.rst", "PAMI.highUtilityGeoreferencedFrequentPattern.rst", "PAMI.highUtilityGeoreferencedFrequentPattern.basic.rst", "PAMI.highUtilityPattern.rst", "PAMI.highUtilityPattern.basic.rst", "PAMI.highUtilityPattern.parallel.rst", "PAMI.highUtilityPatternsInStreams.rst", "PAMI.highUtilitySpatialPattern.rst", "PAMI.highUtilitySpatialPattern.basic.rst", "PAMI.highUtilitySpatialPattern.topk.rst", "PAMI.localPeriodicPattern.rst", "PAMI.localPeriodicPattern.basic.rst", "PAMI.multipleMinimumSupportBasedFrequentPattern.rst", "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.rst", "PAMI.partialPeriodicFrequentPattern.rst", "PAMI.partialPeriodicFrequentPattern.basic.rst", "PAMI.partialPeriodicPattern.rst", "PAMI.partialPeriodicPattern.basic.rst", "PAMI.partialPeriodicPattern.closed.rst", "PAMI.partialPeriodicPattern.maximal.rst", "PAMI.partialPeriodicPattern.pyspark.rst", "PAMI.partialPeriodicPattern.topk.rst", "PAMI.partialPeriodicPatternInMultipleTimeSeries.rst", "PAMI.periodicCorrelatedPattern.rst", "PAMI.periodicCorrelatedPattern.basic.rst", "PAMI.periodicFrequentPattern.rst", "PAMI.periodicFrequentPattern.basic.rst", "PAMI.periodicFrequentPattern.closed.rst", "PAMI.periodicFrequentPattern.cuda.rst", "PAMI.periodicFrequentPattern.maximal.rst", "PAMI.periodicFrequentPattern.pyspark.rst", "PAMI.periodicFrequentPattern.topk.rst", "PAMI.periodicFrequentPattern.topk.TopkPFP.rst", "PAMI.periodicFrequentPattern.topk.kPFPMiner.rst", "PAMI.recurringPattern.rst", "PAMI.recurringPattern.basic.rst", "PAMI.relativeFrequentPattern.rst", "PAMI.relativeFrequentPattern.basic.rst", "PAMI.relativeHighUtilityPattern.rst", "PAMI.relativeHighUtilityPattern.basic.rst", "PAMI.sequence.rst", "PAMI.sequentialPatternMining.rst", "PAMI.sequentialPatternMining.basic.rst", "PAMI.sequentialPatternMining.closed.rst", "PAMI.stablePeriodicFrequentPattern.rst", "PAMI.stablePeriodicFrequentPattern.basic.rst", "PAMI.stablePeriodicFrequentPattern.topK.rst", "PAMI.subgraphMining.rst", "PAMI.subgraphMining.basic.rst", "PAMI.subgraphMining.topK.rst", "PAMI.uncertainFaultTolerantFrequentPattern.rst", "PAMI.uncertainFrequentPattern.rst", "PAMI.uncertainFrequentPattern.basic.rst", "PAMI.uncertainGeoreferencedFrequentPattern.rst", "PAMI.uncertainGeoreferencedFrequentPattern.basic.rst", "PAMI.uncertainPeriodicFrequentPattern.rst", "PAMI.uncertainPeriodicFrequentPattern.basic.rst", "PAMI.weightedFrequentNeighbourhoodPattern.rst", "PAMI.weightedFrequentNeighbourhoodPattern.basic.rst", "PAMI.weightedFrequentPattern.rst", "PAMI.weightedFrequentPattern.basic.rst", "PAMI.weightedFrequentRegularPattern.rst", "PAMI.weightedFrequentRegularPattern.basic.rst", "PAMI.weightedUncertainFrequentPattern.rst", "PAMI.weightedUncertainFrequentPattern.basic.rst", "index.rst", "modules.rst"], "titles": ["PAMI package", "PAMI.AssociationRules package", "PAMI.AssociationRules.basic package", "PAMI.correlatedPattern package", "PAMI.correlatedPattern.basic package", "PAMI.coveragePattern package", "PAMI.coveragePattern.basic package", "PAMI.extras package", "PAMI.extras.DF2DB package", "PAMI.extras.calculateMISValues package", "PAMI.extras.dbStats package", "PAMI.extras.fuzzyTransformation package", "PAMI.extras.generateDatabase package", "PAMI.extras.graph package", "PAMI.extras.image2Database package", "PAMI.extras.imageProcessing package", "PAMI.extras.messaging package", "PAMI.extras.neighbours package", "PAMI.extras.sampleDatasets package", "PAMI.extras.stats package", "PAMI.extras.syntheticDataGenerator package", "PAMI.extras.visualize package", "PAMI.faultTolerantFrequentPattern package", "PAMI.faultTolerantFrequentPattern.basic package", "PAMI.frequentPattern package", "PAMI.frequentPattern.basic package", "PAMI.frequentPattern.closed package", "PAMI.frequentPattern.cuda package", "PAMI.frequentPattern.maximal package", "PAMI.frequentPattern.pyspark package", "PAMI.frequentPattern.topk package", "PAMI.fuzzyCorrelatedPattern package", "PAMI.fuzzyCorrelatedPattern.basic package", "PAMI.fuzzyFrequentPattern package", "PAMI.fuzzyFrequentPattern.basic package", "PAMI.fuzzyGeoreferencedFrequentPattern package", "PAMI.fuzzyGeoreferencedFrequentPattern.basic package", "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern package", "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic package", "PAMI.fuzzyPartialPeriodicPatterns package", "PAMI.fuzzyPartialPeriodicPatterns.basic package", "PAMI.fuzzyPeriodicFrequentPattern package", "PAMI.fuzzyPeriodicFrequentPattern.basic package", "PAMI.geoReferencedPeriodicFrequentPattern package", "PAMI.geoReferencedPeriodicFrequentPattern.basic package", "PAMI.georeferencedFrequentPattern package", "PAMI.georeferencedFrequentPattern.basic package", "PAMI.georeferencedFrequentSequencePattern package", "PAMI.georeferencedPartialPeriodicPattern package", "PAMI.georeferencedPartialPeriodicPattern.basic package", "PAMI.highUtilityFrequentPattern package", "PAMI.highUtilityFrequentPattern.basic package", "PAMI.highUtilityGeoreferencedFrequentPattern package", "PAMI.highUtilityGeoreferencedFrequentPattern.basic package", "PAMI.highUtilityPattern package", "PAMI.highUtilityPattern.basic package", "PAMI.highUtilityPattern.parallel package", "PAMI.highUtilityPatternsInStreams package", "PAMI.highUtilitySpatialPattern package", "PAMI.highUtilitySpatialPattern.basic package", "PAMI.highUtilitySpatialPattern.topk package", "PAMI.localPeriodicPattern package", "PAMI.localPeriodicPattern.basic package", "PAMI.multipleMinimumSupportBasedFrequentPattern package", "PAMI.multipleMinimumSupportBasedFrequentPattern.basic package", "PAMI.partialPeriodicFrequentPattern package", "PAMI.partialPeriodicFrequentPattern.basic package", "PAMI.partialPeriodicPattern package", "PAMI.partialPeriodicPattern.basic package", "PAMI.partialPeriodicPattern.closed package", "PAMI.partialPeriodicPattern.maximal package", "PAMI.partialPeriodicPattern.pyspark package", "PAMI.partialPeriodicPattern.topk package", "PAMI.partialPeriodicPatternInMultipleTimeSeries package", "PAMI.periodicCorrelatedPattern package", "PAMI.periodicCorrelatedPattern.basic package", "PAMI.periodicFrequentPattern package", "PAMI.periodicFrequentPattern.basic package", "PAMI.periodicFrequentPattern.closed package", "PAMI.periodicFrequentPattern.cuda package", "PAMI.periodicFrequentPattern.maximal package", "PAMI.periodicFrequentPattern.pyspark package", "PAMI.periodicFrequentPattern.topk package", "PAMI.periodicFrequentPattern.topk.TopkPFP package", "PAMI.periodicFrequentPattern.topk.kPFPMiner package", "PAMI.recurringPattern package", "PAMI.recurringPattern.basic package", "PAMI.relativeFrequentPattern package", "PAMI.relativeFrequentPattern.basic package", "PAMI.relativeHighUtilityPattern package", "PAMI.relativeHighUtilityPattern.basic package", "PAMI.sequence package", "PAMI.sequentialPatternMining package", "PAMI.sequentialPatternMining.basic package", "PAMI.sequentialPatternMining.closed package", "PAMI.stablePeriodicFrequentPattern package", "PAMI.stablePeriodicFrequentPattern.basic package", "PAMI.stablePeriodicFrequentPattern.topK package", "PAMI.subgraphMining package", "PAMI.subgraphMining.basic package", "PAMI.subgraphMining.topK package", "PAMI.uncertainFaultTolerantFrequentPattern package", "PAMI.uncertainFrequentPattern package", "PAMI.uncertainFrequentPattern.basic package", "PAMI.uncertainGeoreferencedFrequentPattern package", "PAMI.uncertainGeoreferencedFrequentPattern.basic package", "PAMI.uncertainPeriodicFrequentPattern package", "PAMI.uncertainPeriodicFrequentPattern.basic package", "PAMI.weightedFrequentNeighbourhoodPattern package", "PAMI.weightedFrequentNeighbourhoodPattern.basic package", "PAMI.weightedFrequentPattern package", "PAMI.weightedFrequentPattern.basic package", "PAMI.weightedFrequentRegularPattern package", "PAMI.weightedFrequentRegularPattern.basic package", "PAMI.weightedUncertainFrequentPattern package", "PAMI.weightedUncertainFrequentPattern.basic package", "Welcome to PAMI\u2019s documentation!", "PAMI"], "terms": {"associationrul": [0, 117], "basic": [0, 1, 3, 5, 8, 22, 24, 28, 31, 33, 35, 37, 39, 41, 43, 45, 48, 50, 52, 54, 58, 61, 63, 65, 67, 69, 73, 74, 76, 78, 80, 85, 87, 89, 92, 95, 97, 98, 101, 102, 104, 106, 108, 110, 112, 114], "submodul": [0, 1, 3, 5, 22, 24, 31, 33, 35, 37, 39, 41, 43, 45, 48, 50, 52, 54, 61, 63, 65, 67, 74, 76, 82, 85, 87, 89, 92, 95, 98, 102, 104, 106, 108, 110, 112, 114, 117], "arwithconfid": [0, 1], "arwithleverag": [0, 1], "arwithlift": [0, 1], "rulemin": [0, 1], "abstract": [0, 1, 3, 5, 7, 22, 24, 31, 33, 35, 37, 39, 41, 43, 45, 48, 50, 52, 54, 61, 63, 65, 67, 74, 76, 82, 85, 87, 89, 92, 95, 98, 102, 104, 106, 108, 110, 112, 114, 117], "correlatedpattern": [0, 117], "comin": [0, 3], "comineplu": [0, 3], "coveragepattern": [0, 117], "cmine": [0, 5], "cppg": [0, 5], "extra": [0, 117], "df2db": [0, 7], "df2dbplu": [0, 7], "denseformatdf": [0, 7], "sparseformatdf": [0, 7], "createtdb": [0, 7], "densedf2dbplu": [0, 7], "densedf2db_dump": [0, 7], "sparsedf2dbplu": [0, 7], "calculatemisvalu": [0, 7], "usingbeta": [0, 7], "usingsd": [0, 7], "dbstat": [0, 7, 19], "fuzzydatabas": [0, 7], "multipletimeseriesfuzzydatabasestat": [0, 7], "sequentialdatabas": [0, 7], "temporaldatabas": [0, 7, 12], "transactionaldatabas": [0, 7], "uncertaintemporaldatabas": [0, 7], "uncertaintransactionaldatabas": [0, 7], "utilitydatabas": [0, 7], "fuzzytransform": [0, 7], "temporaltofuzzi": [0, 7], "transactionaltofuzzi": [0, 7], "utilitytofuzzi": [0, 7], "generatedatabas": [0, 7], "generatespatiotemporaldatabas": [0, 7], "generatetemporaldatabas": [0, 7], "generatetransactionaldatabas": [0, 7], "graph": [0, 7, 10, 98], "df2fig": [0, 7], "df2tex": [0, 7], "plotlinegraphfromdictionari": [0, 7], "plotlinegraphsfromdatafram": [0, 7], "visualizefuzzypattern": [0, 7], "visualizepattern": [0, 7], "image2databas": [0, 7], "imageprocess": [0, 7], "imagery2databas": [0, 7], "messag": [0, 7], "discord": [0, 7], "gmail": [0, 7], "neighbour": [0, 7, 36, 38, 44, 46, 49, 53, 55, 58, 59, 60], "findneighborsusingeuclideandistanceforpointinfo": [0, 7], "findneighboursusingeuclidean": [0, 7], "findneighboursusinggeodes": [0, 7], "sampledataset": [0, 7], "stat": [0, 7, 10, 105], "graphdatabas": [0, 7, 21], "syntheticdatagener": [0, 7], "createsyntheticgeoreferentialtempor": [0, 7], "createsyntheticgeoreferentialtransact": [0, 7], "createsyntheticgeoreferentialuncertaintransact": [0, 7], "createsynthetictempor": [0, 7], "createsynthetictransact": [0, 7], "createsyntheticuncertaintempor": [0, 7], "createsyntheticuncertaintransact": [0, 7], "createsyntheticutil": [0, 7], "generatetempor": [0, 7], "generatetransact": [0, 7], "generateuncertaintempor": [0, 7], "generateuncertaintransact": [0, 7], "generateutilitytempor": [0, 7], "generateutilitytransact": [0, 7], "georeferencedtemporaldatabas": [0, 7], "georeferencedtransactionaldatabas": [0, 7], "syntheticutilitydatabas": [0, 7], "temporaldatabasegen": [0, 7], "visual": [0, 7, 13], "convertmultitsintofuzzi": [0, 117], "generatelatexgraphfil": [0, 117], "generatelatexcod": [0, 7, 37, 38], "plotpointonmap": [0, 117], "convertpoint": [0, 7], "findtopkpattern": [0, 7], "plotpointinmap": [0, 7], "plotpointonmap_dump": [0, 117], "scatterplotspatialpoint": [0, 117], "topkpattern": [0, 117], "gettopkpattern": [0, 7], "save": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 15, 17, 19, 20, 22, 23, 24, 25, 26, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 55, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 72, 73, 74, 75, 76, 77, 78, 80, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115], "uncertaindb_convert": [0, 117], "predictedclass2transact": [0, 7], "getbinarytransact": [0, 7], "faulttolerantfrequentpattern": [0, 101, 117], "ftapriori": [0, 22], "ftfpgrowth": [0, 22], "frequentpattern": [0, 2, 8, 59, 64, 88, 90, 93, 103, 109, 111, 117], "apriori": [0, 23, 24, 46, 101], "eclat": [0, 24, 46], "eclatdiffset": [0, 24], "eclatbitset": [0, 24], "fpgrowth": [0, 8, 23, 24], "close": [0, 24, 55, 59, 67, 76, 77, 92], "charm": [0, 24], "cuda": [0, 24, 76], "cuapriori": [0, 24], "cuaprioribit": [0, 24], "cueclat": [0, 24], "cueclatbit": [0, 24], "cudaapriorigct": [0, 24], "cudaaprioritid": [0, 24], "cudaeclatgct": [0, 24], "maxim": [0, 24, 62, 67, 76], "maxfpgrowth": [0, 24], "pyspark": [0, 24, 67, 76], "parallelapriori": [0, 24], "paralleleclat": [0, 24], "parallelfpgrowth": [0, 24], "topk": [0, 13, 24, 58, 67, 76, 95, 98], "fae": [0, 24], "fuzzycorrelatedpattern": [0, 117], "fcpgrowth": [0, 31], "fuzzyfrequentpattern": [0, 117], "ffimin": [0, 33], "ffiminer_old": [0, 33], "fuzzygeoreferencedfrequentpattern": [0, 117], "ffspminer": [0, 35, 38], "ffspminer_old": [0, 35], "fuzzygeoreferencedperiodicfrequentpattern": [0, 117], "fgpfpminer": [0, 37], "fgpfpminer_old": [0, 37], "fuzzypartialperiodicpattern": [0, 117], "f3pminer": [0, 39], "fuzzyperiodicfrequentpattern": [0, 117], "fpfpminer": [0, 41], "fpfpminer_old": [0, 41], "georeferencedperiodicfrequentpattern": [0, 117], "gpfpminer": [0, 43], "georeferencedfrequentpattern": [0, 117], "fspgrowth": [0, 45], "spatialeclat": [0, 45], "georeferencedfrequentsequencepattern": [0, 117], "georeferencedpartialperiodicpattern": [0, 117], "steclat": [0, 48], "highutilityfrequentpattern": [0, 117], "hufim": [0, 50], "highutilitygeoreferencedfrequentpattern": [0, 59, 117], "shufim": [0, 52], "highutilitypattern": [0, 117], "efim": [0, 54], "hminer": [0, 54], "upgrowth": [0, 54], "efimparallel": [0, 54], "parallel": [0, 54, 86], "highutilitypatternsinstream": [0, 117], "hupm": [0, 117], "shugrowth": [0, 117], "highutilityspatialpattern": [0, 117], "hdshuim": [0, 58], "shuim": [0, 58], "tkshuim": [0, 58], "utilitypattern": [0, 58, 60], "endtim": [0, 2, 4, 6, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "finalpattern": [0, 2, 4, 6, 11, 23, 25, 26, 28, 30, 44, 46, 49, 58, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "getmemoryrss": [0, 1, 2, 3, 4, 5, 6, 8, 22, 23, 24, 25, 26, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 55, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 72, 73, 74, 75, 76, 77, 78, 80, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115], "getmemoryuss": [0, 1, 2, 3, 4, 5, 6, 8, 22, 23, 24, 25, 26, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 55, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 72, 73, 74, 75, 76, 77, 78, 80, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115], "getpattern": [0, 1, 2, 3, 4, 5, 6, 9, 22, 23, 24, 25, 26, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 55, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 72, 73, 74, 75, 76, 77, 78, 80, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 95, 96, 97, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115], "getpatternsasdatafram": [0, 1, 2, 3, 4, 5, 6, 22, 23, 24, 25, 26, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 55, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 72, 73, 74, 75, 76, 77, 78, 80, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 95, 96, 97, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115], "getruntim": [0, 1, 2, 3, 4, 5, 6, 8, 22, 23, 24, 25, 26, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 55, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 72, 73, 74, 75, 76, 77, 78, 80, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115], "ifil": [0, 2, 4, 6, 7, 9, 10, 11, 13, 17, 19, 21, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "memoryrss": [0, 2, 4, 6, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "memoryuss": [0, 2, 4, 6, 23, 25, 26, 28, 30, 44, 46, 49, 58, 60, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "minutil": [0, 51, 53, 55, 58, 59, 60, 90], "nfile": [0, 36, 38, 44, 46, 49, 51, 53, 58, 59, 60, 105, 109], "ofil": [0, 2, 4, 6, 7, 8, 9, 10, 11, 12, 15, 17, 19, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "startmin": [0, 1, 2, 3, 4, 5, 6, 22, 23, 24, 25, 26, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 55, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 72, 73, 74, 75, 76, 77, 78, 80, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115], "starttim": [0, 2, 4, 6, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "localperiodicpattern": [0, 117], "lppgrowth": [0, 61], "lppmbreadth": [0, 61], "lppmdepth": [0, 61], "multipleminimumsupportbasedfrequentpattern": [0, 117], "cfpgrowth": [0, 63], "cfpgrowthplu": [0, 63], "partialperiodicfrequentpattern": [0, 117], "gpfgrowth": [0, 65], "ppf_df": [0, 65], "partialperiodicpattern": [0, 65, 66, 117], "gthreepgrowth": [0, 67], "gabstract": [0, 67], "pppgrowth": [0, 67], "ppp_eclat": [0, 67], "pppclose": [0, 67], "max3pgrowth": [0, 67], "parallel3pgrowth": [0, 67], "k3pminer": [0, 67], "partialperiodicpatterninmultipletimeseri": [0, 117], "ppgrowth": [0, 117], "mine": [0, 1, 2, 3, 4, 5, 6, 9, 11, 17, 22, 23, 24, 25, 26, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 55, 58, 59, 60, 61, 62, 63, 64, 66, 67, 68, 69, 72, 73, 75, 76, 77, 78, 80, 82, 83, 84, 85, 86, 87, 88, 90, 92, 93, 95, 96, 97, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115], "printresult": [0, 1, 2, 3, 4, 5, 6, 22, 23, 24, 25, 26, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 55, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 72, 73, 74, 75, 76, 77, 78, 80, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 95, 96, 97, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115], "periodiccorrelatedpattern": [0, 117], "epcpgrowth": [0, 74], "periodicfrequentpattern": [0, 42, 68, 69, 73, 86, 107, 117], "pfeclat": [0, 76], "pfpgrowth": [0, 75, 76], "pfpgrowthplu": [0, 76], "pfpmc": [0, 76], "psgrowth": [0, 76], "parallelpfpgrowth": [0, 76], "cpfpminer": [0, 76], "cugpfmin": [0, 76], "gpfminerbit": [0, 76], "maxpfgrowth": [0, 76], "recurringpattern": [0, 117], "rpgrowth": [0, 85], "relativefrequentpattern": [0, 117], "rsfpgrowth": [0, 87], "relativehighutilitypattern": [0, 117], "rhuim": [0, 89], "sequenc": [0, 10, 19, 51, 53, 62, 93, 117], "sequentialpatternmin": [0, 117], "spade": [0, 92], "spam": [0, 92], "prefixspan": [0, 92], "bide": [0, 92], "stableperiodicfrequentpattern": [0, 117], "sppeclat": [0, 95], "sppgrowth": [0, 95], "sppgrowthdump": [0, 95], "tspin": [0, 95], "subgraphmin": [0, 117], "dfscode": [0, 98], "edg": [0, 98], "extendededg": [0, 98], "frequentsubgraph": [0, 98], "gspan": [0, 98, 100], "sparsetriangularmatrix": [0, 98], "vertex": [0, 98], "dfsthread": [0, 98], "tkg": [0, 98], "uncertainfaulttolerantfrequentpattern": [0, 117], "vbftmine": [0, 117], "uncertainfrequentpattern": [0, 117], "cufptre": [0, 102], "pufgrowth": [0, 102], "tufp": [0, 102], "tubep": [0, 102], "tube": [0, 102], "ufgrowth": [0, 102], "uveclat": [0, 102], "uncertaingeoreferencedfrequentpattern": [0, 117], "gfpgrowth": [0, 104], "uncertainperiodicfrequentpattern": [0, 117], "upfpgrowth": [0, 106], "upfpgrowthplu": [0, 106], "weightedfrequentneighbourhoodpattern": [0, 117], "swfpgrowth": [0, 108], "weightedfrequentpattern": [0, 117], "wfim": [0, 110], "weightedfrequentregularpattern": [0, 117], "wfrimin": [0, 112], "weighteduncertainfrequentpattern": [0, 117], "wufim": [0, 114], "A": [0, 4, 6, 20, 23, 25, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 77, 90, 93, 107], "pattern": [0, 2, 4, 6, 7, 8, 9, 11, 13, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "python": [0, 2, 4, 68, 72, 78, 83, 99, 100], "librari": 0, "lift": [1, 2], "run": [1, 2, 4, 6, 7, 8, 25, 26, 28, 30, 77, 78, 83, 86, 88, 90, 96, 97, 98, 99, 100, 103, 107, 109, 111, 113, 115], "confid": [1, 2, 4], "leverag": [1, 2], "class": [2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17, 19, 20, 21, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "minconf": 2, "sep": [2, 4, 6, 7, 9, 10, 11, 12, 15, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "sourc": [2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17, 19, 20, 21, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "base": [2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17, 19, 20, 21, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "object": [2, 7, 8, 9, 10, 12, 13, 15, 16, 17, 19, 20, 21, 32, 60, 62, 77, 99, 100], "descript": [2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 15, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "associ": [2, 25, 46, 55, 64], "rule": [2, 25, 32, 46, 64], "ar": [2, 8, 26, 34, 36, 38, 42, 51, 53, 55, 59, 60, 62, 68, 77, 80, 90, 93, 96, 99, 103, 105, 107, 115], "deriv": 2, "from": [2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 15, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "frequent": [2, 6, 8, 9, 11, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "us": [2, 4, 6, 7, 9, 10, 11, 12, 13, 15, 17, 19, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115], "metric": [2, 100], "refer": [2, 4, 6, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115], "param": [2, 4, 6, 8, 9, 10, 12, 13, 17, 19, 20, 23, 25, 26, 28, 55, 60, 66, 73, 77, 78, 83, 90, 93, 99, 100], "str": [2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "name": [2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "input": [2, 4, 6, 7, 9, 10, 11, 12, 13, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115], "file": [2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "complet": [2, 4, 6, 8, 9, 10, 11, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "set": [2, 4, 6, 7, 8, 9, 11, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115], "output": [2, 4, 6, 7, 8, 9, 10, 11, 12, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "store": [2, 4, 6, 7, 8, 9, 10, 11, 13, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115], "float": [2, 4, 6, 7, 8, 10, 12, 15, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "The": [2, 4, 6, 7, 9, 10, 11, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "user": [2, 4, 6, 7, 8, 9, 11, 17, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "can": [2, 4, 6, 7, 9, 11, 12, 17, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "specifi": [2, 4, 9, 12, 13, 17, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "between": [2, 4, 10, 19, 25, 62, 88, 99], "rang": [2, 4, 10, 19, 20, 32, 64, 103, 105, 107, 109, 111, 115], "0": [2, 4, 6, 7, 10, 12, 13, 19, 20, 23, 25, 26, 28, 32, 34, 36, 44, 46, 49, 55, 60, 62, 68, 69, 72, 73, 75, 77, 78, 80, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "1": [2, 4, 10, 19, 23, 26, 32, 36, 42, 59, 62, 64, 68, 77, 88, 93, 101, 111], "variabl": [2, 4, 6, 7, 9, 10, 11, 17, 19, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "i": [2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 15, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "distinguish": [2, 4, 6, 7, 9, 11, 17, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "item": [2, 4, 6, 7, 8, 9, 10, 11, 12, 15, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "one": [2, 4, 6, 7, 9, 11, 17, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "anoth": [2, 4, 6, 7, 9, 11, 17, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 98, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "transact": [2, 4, 6, 7, 8, 9, 10, 11, 12, 15, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "default": [2, 4, 6, 7, 8, 9, 10, 11, 17, 19, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "seper": [2, 4, 6, 7, 9, 11, 12, 17, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "tab": [2, 4, 6, 7, 9, 10, 11, 17, 19, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "space": [2, 4, 6, 7, 9, 10, 11, 17, 19, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "howev": [2, 4, 6, 7, 9, 11, 17, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "overrid": [2, 4, 6, 7, 9, 11, 17, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 100, 101, 103, 105, 107, 109, 111, 113, 115], "separ": [2, 4, 6, 7, 9, 10, 11, 17, 19, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "attribut": [2, 4, 6, 7, 8, 10, 11, 12, 13, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "To": [2, 4, 6, 8, 9, 10, 12, 13, 19, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "record": [2, 4, 6, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "start": [2, 4, 6, 13, 23, 25, 26, 28, 30, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "time": [2, 4, 6, 8, 10, 12, 19, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "process": [2, 4, 6, 7, 11, 15, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "dict": [2, 4, 6, 7, 10, 11, 13, 19, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "dictionari": [2, 4, 6, 7, 10, 11, 19, 23, 25, 26, 28, 30, 34, 44, 46, 49, 53, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 93, 96, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115], "total": [2, 4, 6, 8, 10, 12, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "amount": [2, 4, 6, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "uss": [2, 4, 6, 8, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "memori": [2, 4, 6, 8, 13, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 100, 101, 103, 105, 107, 109, 111, 113, 115], "consum": [2, 4, 6, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "program": [2, 4, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 78, 80, 83, 84, 101, 105], "rss": [2, 4, 6, 8, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "termin": [2, 4, 73, 78], "command": [2, 4, 6, 73, 78], "format": [2, 4, 6, 10, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "venv": [2, 4, 6, 10, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 73, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 101, 103, 105, 107, 109, 111, 113, 115], "python3": [2, 4, 6, 10, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "py": [2, 4, 6, 10, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "inputfil": [2, 4, 6, 7, 10, 19, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "outputfil": [2, 4, 6, 7, 8, 10, 12, 15, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "exampl": [2, 4, 6, 10, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "usag": [2, 4, 6, 10, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 77, 80, 86, 88, 90, 93, 96, 100, 101, 103, 105, 107, 109, 111, 113, 115], "sampledb": [2, 10, 19, 23, 25, 26, 28, 30, 62, 64, 68, 72, 77, 83, 84, 88, 93, 96, 101, 109, 111, 113], "txt": [2, 4, 6, 10, 12, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "5": [2, 4, 6, 12, 20, 44, 46, 49, 62, 77, 96, 113], "valu": [2, 4, 7, 8, 9, 10, 12, 13, 15, 17, 19, 20, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 66, 73, 75, 77, 86, 90, 93, 97, 99, 103, 105, 107, 109, 115], "call": [2, 4, 6, 58, 60, 62, 78, 93, 99], "import": [2, 4, 78, 83], "alg": [2, 4, 6, 10, 19, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "obj": [2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 15, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "print": [2, 4, 6, 8, 10, 12, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "number": [2, 4, 6, 10, 12, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "len": [2, 4, 6, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "df": [2, 4, 6, 8, 23, 25, 26, 28, 30, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115], "getpatternindatafram": [2, 23, 25, 30, 64, 72, 83, 84, 93, 101, 113], "memuss": [2, 4, 6, 8, 23, 25, 26, 28, 30, 32, 34, 36, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 103, 105, 107, 109, 111, 113, 115], "memrss": [2, 4, 6, 8, 23, 25, 26, 28, 30, 32, 34, 36, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 103, 105, 107, 109, 111, 113, 115], "executiontim": [2, 4, 6, 8, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "second": [2, 4, 6, 8, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115], "wa": [2, 4, 6, 10, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113], "written": [2, 4, 6, 10, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113], "p": [2, 6, 20, 23, 25, 26, 28, 30, 32, 36, 44, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 90, 96, 97, 101, 103, 105, 107, 109, 111, 113], "likhitha": [2, 6, 20, 23, 25, 26, 28, 30, 40, 49, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 96, 97, 101, 103, 105, 107, 109, 111, 113], "under": [2, 4, 6, 10, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113], "supervis": [2, 4, 6, 10, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113], "professor": [2, 4, 6, 10, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113], "rage": [2, 4, 6, 10, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113], "udai": [2, 4, 6, 10, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113], "kiran": [2, 4, 6, 10, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113], "retriev": [2, 4, 6, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115], "function": [2, 4, 6, 10, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "return": [2, 4, 6, 7, 8, 9, 10, 12, 13, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "type": [2, 4, 6, 8, 9, 10, 12, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115], "send": [2, 4, 6, 7, 16, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 97, 101, 103, 105, 107, 109, 111, 113, 115], "after": [2, 4, 6, 12, 19, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "final": [2, 4, 6, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "datafram": [2, 4, 6, 7, 8, 9, 10, 12, 13, 15, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "pd": [2, 4, 6, 9, 12, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "calcul": [2, 4, 6, 9, 10, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 100, 101, 103, 105, 107, 109, 111, 113, 115], "runtim": [2, 4, 6, 13, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 100, 101, 103, 105, 107, 109, 111, 113, 115], "taken": [2, 4, 6, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 100, 101, 103, 105, 107, 109, 111, 113, 115], "here": [2, 23, 25, 26, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "result": [2, 4, 6, 7, 10, 19, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 86, 88, 90, 93, 96, 97, 99, 101, 103, 107, 109, 111, 113, 115], "outfil": [2, 4, 6, 8, 9, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "load": [2, 4, 6, 8, 9, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "an": [2, 4, 6, 8, 9, 23, 25, 26, 30, 32, 34, 36, 38, 42, 44, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 72, 75, 77, 78, 88, 90, 93, 96, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115], "paramet": [2, 4, 6, 7, 8, 9, 11, 12, 13, 15, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "10": [2, 4, 6, 7, 12, 17, 20, 23, 25, 26, 30, 32, 34, 36, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "none": [2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 19, 20, 23, 25, 32, 34, 36, 42, 51, 55, 59, 60, 62, 64, 68, 75, 77, 80, 88, 90, 97, 99, 100, 103, 107, 109, 111, 113, 115], "singleitem": 2, "contain": [2, 13, 51, 53, 55, 59, 60, 90, 99], "its": [2, 9, 10, 19, 34, 36, 38, 42, 62, 93, 99], "support": [2, 4, 9, 26, 32, 34, 36, 38, 40, 42, 51, 53, 58, 64, 66, 68, 73, 75, 77, 80, 83, 86, 88, 93, 96, 97, 99, 100, 103, 105, 107, 109, 111, 113, 115], "list": [2, 4, 6, 7, 8, 10, 12, 15, 19, 20, 23, 25, 26, 28, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 72, 73, 75, 77, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "all": [2, 4, 6, 8, 10, 15, 19, 34, 36, 38, 40, 42, 51, 53, 55, 58, 59, 60, 62, 64, 66, 69, 72, 86, 88, 90, 96, 99, 105, 107], "singl": [2, 25, 51, 55, 60, 90, 100], "int": [2, 4, 6, 7, 8, 9, 10, 12, 13, 15, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "minimum": [2, 4, 6, 7, 9, 10, 12, 19, 23, 30, 34, 36, 38, 40, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 77, 78, 80, 86, 88, 90, 93, 96, 99, 101, 103, 105, 107, 109, 111, 115], "satisfi": [2, 53, 59], "gener": [2, 4, 6, 7, 12, 20, 23, 25, 26, 32, 34, 36, 38, 40, 42, 44, 46, 49, 55, 58, 59, 60, 62, 66, 68, 72, 77, 83, 84, 93, 96, 99, 107], "combin": [2, 4, 25, 26, 46, 68, 72, 83, 84, 88], "threshold": [2, 7, 8, 9, 15, 59, 60, 64, 83, 90, 99, 103, 105, 107, 109, 111, 115], "measur": [2, 62, 90, 93], "code": [2, 9, 12, 15, 58, 66, 83, 99], "extract": [2, 23, 25, 26, 64, 68, 73, 75, 77, 80, 86, 88, 97, 103, 105, 107, 109, 111, 113, 115], "given": [2, 4, 7, 8, 9, 12, 13, 20, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 66, 88, 90, 99], "form": [2, 10, 25, 51, 68, 88, 90], "condit": [2, 8, 25, 55, 77, 88, 99], "strength": 2, "o": 2, "csv": [2, 4, 8, 9, 12, 20, 23, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 66, 68, 75, 77, 78, 80, 93, 96, 103, 105, 107, 109, 111, 113, 115], "minsup": [4, 6, 13, 23, 25, 26, 28, 32, 34, 36, 38, 40, 42, 44, 46, 51, 53, 55, 59, 64, 66, 73, 75, 77, 78, 80, 83, 88, 90, 93, 96, 99, 100, 101, 103, 105, 107, 109, 111, 115], "minallconf": [4, 32, 75], "t": [4, 6, 7, 10, 11, 12, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "_correlatedpattern": 4, "fundament": [4, 23, 25, 28, 49, 64, 68, 73, 77, 80, 86, 93, 101, 103, 111, 113], "discov": [4, 6, 23, 25, 26, 28, 30, 32, 36, 40, 42, 46, 49, 53, 58, 59, 60, 64, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "correl": [4, 32, 75], "databas": [4, 6, 7, 8, 9, 10, 11, 12, 15, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 88, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "It": [4, 7, 8, 10, 15, 19, 20, 23, 25, 26, 46, 59, 62, 66, 69, 72, 77, 78, 83, 84, 86, 93, 99, 103, 111, 113, 115], "tradit": [4, 32], "fp": [4, 8, 23, 25, 111], "growth": [4, 28, 55, 62, 77, 80, 93, 107], "depth": [4, 26, 62, 69, 78, 93, 99], "first": [4, 25, 26, 55, 60, 62, 69, 78, 90, 93, 99], "search": [4, 23, 25, 26, 34, 36, 38, 42, 62, 64, 69, 78, 93, 99, 101, 111, 113, 116], "techniqu": [4, 25, 26, 34, 36, 38, 42, 64, 93], "find": [4, 7, 17, 25, 26, 32, 34, 36, 38, 40, 42, 51, 55, 59, 60, 62, 64, 88, 93, 99, 100, 103, 105, 107, 109, 111, 115], "lee": [4, 77, 107], "y": [4, 12, 13, 23, 25, 51, 77, 84, 97, 99, 100], "k": [4, 7, 30, 36, 49, 51, 59, 60, 72, 73, 77, 83, 84, 90, 97, 100, 107, 109, 113], "kim": 4, "w": [4, 59, 96, 107, 113], "cao": 4, "d": 4, "han": [4, 23, 25, 64, 93], "j": [4, 23, 25, 26, 28, 51, 53, 55, 59, 60, 62, 64, 68, 69, 77, 90, 93, 96, 99, 100, 103, 107, 111], "2003": [4, 25], "effici": [4, 6, 26, 34, 36, 38, 42, 46, 51, 55, 77, 88, 93, 101, 107, 115], "In": [4, 10, 19, 25, 55, 93, 99, 101, 107, 115], "icdm": 4, "pp": [4, 25, 36, 42, 49, 59, 60, 73, 77, 90, 96, 109, 111, 113], "581": 4, "584": 4, "either": [4, 9, 20, 23, 25, 26, 28, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 68, 69, 72, 73, 75, 77, 78, 80, 86, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "count": [4, 9, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 90, 93, 96, 97, 100, 101, 103, 105, 107, 109, 111, 113, 115], "proport": [4, 9, 23, 25, 26, 28, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 68, 69, 72, 73, 75, 77, 78, 80, 86, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "size": [4, 9, 10, 12, 13, 19, 23, 25, 26, 28, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 68, 69, 72, 73, 75, 77, 78, 80, 86, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "If": [4, 9, 23, 25, 26, 28, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 68, 69, 72, 73, 75, 77, 78, 80, 86, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "detect": [4, 9, 23, 25, 26, 28, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 62, 68, 69, 72, 73, 75, 77, 78, 80, 86, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "data": [4, 7, 8, 9, 10, 11, 12, 13, 15, 19, 20, 23, 25, 26, 28, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 66, 68, 69, 72, 73, 75, 77, 78, 80, 84, 86, 90, 93, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "integ": [4, 9, 10, 23, 25, 26, 28, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 68, 69, 72, 73, 75, 77, 78, 80, 86, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "treat": [4, 9, 23, 25, 26, 28, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 68, 69, 72, 73, 75, 77, 78, 80, 86, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "express": [4, 9, 23, 25, 26, 28, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 68, 69, 72, 73, 75, 77, 78, 80, 86, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "within": [4, 6, 32, 62, 77, 78, 80, 83, 88, 99], "ratio": [4, 32], "should": [4, 8, 32, 99], "mapsupport": [4, 23, 25, 26, 28, 64, 68, 73, 75, 77, 80, 86, 88, 96, 97, 103, 105, 107, 109, 111, 113, 115], "maintain": [4, 23, 25, 26, 28, 62, 64, 68, 73, 75, 77, 80, 86, 88, 96, 97, 103, 105, 107, 109, 111, 113, 115], "inform": [4, 6, 23, 25, 26, 28, 62, 64, 68, 73, 75, 77, 80, 83, 86, 88, 96, 97, 99, 100, 103, 105, 107, 109, 111, 113, 115], "frequenc": [4, 10, 19, 23, 25, 26, 28, 64, 68, 73, 75, 77, 80, 86, 88, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "lno": [4, 23, 25, 26, 28, 64, 68, 73, 75, 77, 80, 82, 84, 86, 88, 96, 97, 103, 105, 107, 109, 111, 113, 115], "repres": [4, 8, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 60, 62, 64, 68, 73, 75, 77, 80, 86, 88, 96, 97, 99, 100, 103, 105, 107, 109, 111, 113, 115], "tree": [4, 23, 25, 26, 28, 51, 53, 55, 59, 60, 61, 62, 64, 68, 73, 75, 77, 80, 86, 88, 90, 96, 97, 103, 105, 107, 109, 111, 113, 115], "itemsetcount": [4, 26, 28, 73, 75, 77, 80, 86, 88, 96, 97, 103, 105, 107, 115], "itemsetbuff": [4, 34, 36, 38, 40, 42, 88], "maxpatternlength": [4, 88], "constraint": [4, 53, 59, 88, 90, 93, 96], "length": [4, 7, 10, 12, 19, 20, 23, 55, 59, 62, 88, 93, 101, 103, 105, 107, 115], "sampletdb": [4, 6, 28, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 69, 73, 75, 77, 78, 80, 86, 90, 96, 97, 103, 105, 107, 115], "25": 4, "2": [4, 20, 28, 32, 34, 36, 38, 40, 42, 49, 53, 59, 60, 68, 69, 77, 78, 80, 86, 88, 93, 96, 109], "savepattern": [4, 25, 26, 28, 64, 84, 86, 90, 93, 97, 98, 100, 105, 107, 111], "b": [4, 32, 34, 36, 38, 42, 46, 55, 59, 68, 88, 93, 101], "sai": [4, 20, 32, 34, 36, 38, 42, 46, 55, 59, 88], "chitra": [4, 32, 34, 36, 38, 42, 46, 55, 59, 88], "tupl": [4, 6, 12, 23, 62, 77, 80], "main": [4, 6, 11, 23, 25, 30, 31, 32, 52, 53, 55, 58, 59, 60, 64, 68, 72, 77, 80, 83, 84, 88, 93, 100, 103, 105, 107, 109, 111, 113], "interv": [4, 8, 62], "which": [4, 6, 7, 34, 36, 38, 42, 44, 46, 51, 55, 62, 68, 77, 78, 80, 83, 88, 90, 93, 96, 99, 100], "higher": 4, "order": [4, 34, 40, 53, 59, 60, 99], "onli": [4, 20, 51, 53, 55, 59, 60, 90, 93], "have": [4, 55, 59, 60, 62, 93, 96, 99], "r": [4, 6, 25, 36, 42, 49, 51, 62, 64, 68, 69, 73, 77, 80, 84, 88, 90, 107, 109], "kitsuregawa": [4, 46, 49, 51, 69, 73, 80, 86, 88, 109], "m": [4, 12, 20, 40, 49, 51, 62, 73, 77, 90, 93, 107, 109], "2012": [4, 103], "discoveri": [4, 23, 25, 55, 77, 88, 93], "liddl": 4, "": [4, 6, 13, 25, 36, 51, 53, 55, 59, 62, 68, 69, 72, 73, 77, 78, 80, 83, 84, 88, 90, 93, 97, 100], "schew": 4, "kd": 4, "tjoa": 4, "zhou": [4, 101], "x": [4, 12, 13, 55, 59, 99, 100, 101], "ed": [4, 101, 107], "expert": [4, 103], "system": [4, 6, 34, 36, 42, 77, 96, 101, 103], "applic": [4, 59, 96, 101, 103], "dexa": 4, "lectur": [4, 84, 101], "note": [4, 64, 77, 78, 80, 84, 86, 88, 90, 93, 96, 101, 103, 105, 107, 109, 111, 113, 115], "comput": [4, 55, 77, 84, 101, 107], "scienc": [4, 62, 84, 101, 107], "vol": [4, 84, 101, 107], "7446": 4, "springer": [4, 6, 83, 84, 96, 101, 107], "berlin": [4, 101], "heidelberg": [4, 101], "http": [4, 6, 23, 25, 26, 28, 30, 32, 34, 55, 64, 68, 69, 72, 75, 77, 78, 80, 83, 84, 86, 88, 93, 97, 101, 103, 105, 107, 111], "doi": [4, 6, 23, 25, 26, 30, 32, 36, 42, 49, 51, 55, 59, 60, 62, 64, 68, 72, 73, 77, 84, 90, 93, 97, 101, 103, 105, 107, 109, 111, 113], "org": [4, 6, 23, 25, 26, 30, 55, 64, 68, 72, 77, 78, 80, 84, 93, 97, 101, 103, 105, 107, 111], "1007": [4, 6, 51, 53, 55, 59, 72, 77, 83, 84, 97, 101, 105, 107], "978": [4, 51, 53, 72, 77, 83, 84, 105, 107], "3": [4, 6, 9, 13, 23, 25, 28, 34, 36, 38, 42, 44, 49, 51, 53, 59, 62, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 93, 96, 97, 101, 103, 105, 107, 111, 115], "642": [4, 77, 83], "32600": 4, "4_18": 4, "neighbourhood": [4, 17, 32, 44, 46, 49, 58, 59, 60, 109], "4": [4, 6, 32, 38, 49, 62, 68, 69, 73, 75, 77, 78, 80, 86, 96, 97, 103, 107, 111], "oper": [4, 23, 25, 55, 59, 64, 88, 99, 109, 111, 113], "creatingcoverageitem": [5, 6], "genpattern": [5, 6], "generateallpattern": [5, 6], "tidtobitset": [5, 6], "minrf": 6, "minc": 6, "maxor": 6, "_coveragepattern": 6, "aim": [6, 53, 59, 60, 96], "coverag": 6, "bhargav": 6, "sripada": 6, "polep": 6, "krishna": [6, 49, 64, 68, 69, 73, 109], "reddi": [6, 49, 51, 64, 68, 73, 77, 90, 109], "banner": 6, "advertis": 6, "placement": 6, "www": [6, 34, 68, 69, 75, 86], "companion": 6, "volum": [6, 103], "2011": [6, 64, 103], "131": 6, "132": 6, "__http": 6, "dl": [6, 72], "acm": [6, 25, 55, 72, 93], "1145": [6, 25, 55, 64, 68], "1963192": 6, "1963259": 6, "control": [6, 62, 77, 78, 80, 83, 88], "everi": [6, 58, 60, 62, 66, 72, 77, 78, 80, 88], "must": [6, 58, 60, 66, 72, 77, 78, 80, 88, 90, 93], "appear": [6, 62, 77, 78, 80, 88, 90, 93], "least": [6, 12, 88], "maximum": [6, 10, 12, 19, 20, 42, 51, 53, 55, 59, 60, 62, 77, 78, 80, 83, 86, 90, 93, 96, 97, 107, 109], "ani": [6, 62, 77, 78, 80, 83, 100], "two": [6, 20, 51, 53, 55, 59, 60, 62, 77, 78, 80, 83, 90, 93], "reappear": [6, 62, 77, 78, 80, 83], "7": 6, "creat": [6, 7, 8, 9, 12, 15, 17, 20, 55, 59, 60, 62, 99, 100], "_databas": 6, "coveragetiddata": 6, "tid": [6, 12, 20, 32, 34, 36, 38, 40, 42, 62, 84], "prefix": [6, 26, 32, 34, 36, 38, 40, 42, 55, 59, 60, 62, 68, 77, 88, 93, 96], "tiddata": 6, "string": [6, 34, 40, 60, 62, 86], "coverageitem": 6, "item_set": 6, "convert": [6, 7, 8, 10, 11, 12, 13, 32, 34, 36, 38, 40, 42, 44, 46, 49, 73, 75, 77, 86, 93, 97, 103, 105, 107, 115], "bitset": [6, 25, 101], "gowtham": 6, "sriniva": 6, "trinath": 6, "v": [6, 99, 100, 103], "2015": [6, 34, 77], "journal": [6, 34, 77], "intellig": [6, 34, 59, 77, 96], "45": 6, "423": 6, "439": 6, "link": [6, 62, 83], "com": [6, 83], "articl": [6, 68], "s10844": 6, "014": 6, "0318": 6, "consid": [6, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 64, 77, 78, 80, 86, 88, 90, 93, 96, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115], "percentag": [6, 10, 12, 19, 20, 25, 26, 28, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 77, 78, 80, 86, 88, 96, 97], "period": [6, 10, 19, 36, 38, 40, 42, 44, 49, 62, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 96, 97, 107, 109, 115], "gettemporaldatabas": [7, 8], "gettransactionaldatabas": [7, 8], "getutilitydatabas": [7, 8], "convert2multipletimeseri": [7, 8], "convert2temporaldatabas": [7, 8], "convert2transactionaldatabas": [7, 8], "convert2uncertaintransact": [7, 8], "convert2utilitydatabas": [7, 8], "getfilenam": [7, 8, 12, 17, 20], "createtempor": [7, 8], "createtransact": [7, 8, 58, 60], "createutil": [7, 8], "denseformatdfplu": [7, 8], "sparseformatdfplu": [7, 8], "calculatemi": [7, 9], "getmisdatafram": [7, 9], "getdatafram": [7, 9, 15], "creatingitemset": [7, 10, 19, 23, 44, 46, 49, 64, 68, 72, 73, 75, 77, 80, 83, 84, 86, 88, 96, 97, 103, 105, 107, 109, 111, 113, 115], "getaveragetransactionlength": [7, 10, 19], "getaverageutil": [7, 10, 19], "getdatabases": [7, 10, 19], "getfrequenciesinrang": [7, 10, 19], "getmaximumtransactionlength": [7, 10, 19], "getmaximumutil": [7, 10, 19], "getminimumtransactionlength": [7, 10, 19], "getminimumutil": [7, 10, 19], "getnumberofitem": [7, 10, 19], "getsortedlistofitemfrequ": [7, 10, 19], "getsortedutilityvaluesofitem": [7, 10, 19], "getspars": [7, 10, 19], "getstandarddeviationtransactionlength": [7, 10, 19], "gettotalnumberofitem": [7, 10, 19], "gettotalutil": [7, 10, 19], "gettransanctionallengthdistribut": [7, 10, 19], "getvariancetransactionlength": [7, 10, 19], "plotgraph": [7, 10, 19], "printstat": [7, 10, 19, 54, 55], "readdatabas": [7, 10, 19], "convertdataintomatrix": [7, 10, 19], "getdens": [7, 10, 19], "getaverageitempersequencelength": [7, 10, 19], "getaverageitempersubsequencelength": [7, 10, 19], "getaveragesubsequencepersequencelength": [7, 10, 19], "getmaximumsequencelength": [7, 10, 19], "getmaximumsubsequencelength": [7, 10, 19], "getminimumsequencelength": [7, 10, 19], "getminimumsubsequencelength": [7, 10, 19], "getsequences": [7, 10, 19], "getsequenciallengthdistribut": [7, 10, 19], "getstandarddeviationsequencelength": [7, 10, 19], "getstandarddeviationsubsequencelength": [7, 10, 19], "getsubsequenciallengthdistribut": [7, 10, 19], "getvariancesequencelength": [7, 10, 19], "getvariancesubsequencelength": [7, 10, 19], "getaverageinterarrivalperiod": [7, 10, 19], "getaverageperiodofitem": [7, 10, 19], "getmaximuminterarrivalperiod": [7, 10, 19], "getmaximumperiodofitem": [7, 10, 19], "getminimuminterarrivalperiod": [7, 10, 19], "getminimumperiodofitem": [7, 10, 19], "getnumberoftransactionspertimestamp": [7, 10, 19], "getperiodsinrang": [7, 10, 19], "getstandarddeviationperiod": [7, 10, 19], "getaverageperiod": [7, 10, 19], "getmaximumperiod": [7, 10, 19], "getminimumperiod": [7, 10, 19], "startconvert": [7, 11], "spatiotemporaldatabasegener": [7, 12], "alreadyad": [7, 12], "coinflip": [7, 12], "createpoint": [7, 12], "outfilenam": [7, 12, 20], "saveasfil": [7, 12], "timestamp": [7, 12, 26, 62, 68, 77, 96, 107], "createtemporalfil": [7, 12, 20], "getdatabaseasdatafram": [7, 12, 20], "performcoinflip": [7, 12, 20], "tune": [7, 12, 20, 64, 103, 105, 107, 109, 111, 115], "generatearrai": [7, 12, 20], "gettransact": [7, 8, 12, 20, 58, 60], "plot": [7, 10, 13, 19, 21], "plotgraphsfromdatafram": [7, 13], "createdatabas": [7, 15], "saveastemporaldb": [7, 15], "saveastransactionaldb": [7, 15], "saveasuncertaintemporaldb": [7, 15], "saveasuncertaintransactionaldb": [7, 15], "saveasutilitytemporaldb": [7, 15], "saveasutilitytransactionaldb": [7, 15], "createneighborhoodfileusingeuclideandist": [7, 17], "createneighborhoodfileusinggeodesicdist": [7, 17], "plotedgedistribut": [7, 19], "plotnodedistribut": [7, 19], "printgraphdatabasestatist": [7, 19], "printindividualgraphstat": [7, 19], "creategeoreferentialtemporaldatabas": [7, 20], "creategeoreferentialtransactionaldatabas": [7, 20], "creategeoreferentialuncertaintransactionaldatabas": [7, 20], "createtemporaldatabas": [7, 20], "createtransactionaldatabas": [7, 20], "createuncertaintemporaldatabas": [7, 20], "createuncertaintransactionaldatabas": [7, 20], "createutilitydatabas": [7, 20], "totaltransact": [7, 20], "numofitem": [7, 20], "maxutilrang": [7, 20], "avgtransactionlength": [7, 12, 20], "__init__": [7, 20], "createsyntheticutilitydatabas": [7, 20], "createrandomnumb": [7, 20], "total_transact": [7, 20], "num_of_item": [7, 20], "avg_transaction_length": [7, 20], "create_temporal_databas": [7, 20], "generate_random_numb": [7, 20], "utilitydatagener": [7, 20], "generateandprintitempair": [7, 20], "generateexternalutilitydata": [7, 20], "getexternalutilitydata": [7, 20], "getinternalutilitydata": [7, 20], "getutilitydata": [7, 20], "saveitemsinternalutilityvalu": [7, 20], "saveitemsexternalutilityvalu": [7, 20], "latexgraphfil": 7, "fuz": 7, "idf": [7, 8, 13], "inputpattern": 7, "take": [7, 13, 20, 99], "point": [7, 13], "map": [7, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 90, 93, 99], "fuzfil": [7, 11, 34, 38], "plt": [7, 13], "top": [7, 30, 59, 60, 72, 83, 84, 97], "path": [7, 8, 10, 17, 19, 20, 44, 46, 49, 58, 60, 62, 64, 66, 68, 69, 72, 73, 75, 77, 78, 80, 83, 84, 86, 93, 96, 97, 99, 100, 103, 105, 107, 109, 111, 113, 115], "rank": [7, 30], "method": [7, 10, 11, 12, 13, 17, 19, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 75, 80, 83, 84, 99, 100, 105, 107], "storetopkpattern": 7, "get": [7, 8, 9, 10, 12, 19, 20, 44, 46, 49, 62, 93], "defin": [7, 8, 58, 60, 62, 66, 72], "patternid": 7, "predicted_class": 7, "minthreshold": 7, "8": [7, 23, 25, 36, 42, 77], "predict": 7, "dens": [7, 8], "un": 7, "inputdf": 8, "thresholdvalu": 8, "dftype": 8, "spars": 8, "db": [8, 9, 10, 11, 12, 15, 17, 19, 20], "16": [8, 9, 15, 93], "outputfilenam": [8, 9, 12, 20], "gettempor": 8, "tempor": [8, 11, 12, 20, 42, 59, 68, 69, 72, 75, 77, 78, 80, 83, 84, 96, 107], "getutil": [8, 58, 60], "util": [8, 10, 19, 20, 32, 51, 53, 55, 59, 60, 90], "rtype": [8, 9, 10, 12, 19, 25, 26, 28, 44, 53, 55, 60, 90, 93, 96, 107, 115], "judg": 8, "iddf": 8, "multipl": [8, 9, 10, 40, 64], "timeseri": [8, 40], "seri": [8, 10, 73, 77, 86], "write": [8, 99, 100], "break": 8, "union": 8, "thresholdconditiondf": 8, "denseformatdf_dump": 8, "beta": 9, "respect": [9, 25, 42, 62, 77, 88, 100, 107], "sd": 9, "read": [10, 19, 99, 100], "averag": [10, 12, 19, 20], "sum": [10, 12, 19, 20, 32, 34, 36, 38, 40, 42, 60], "divid": [10, 19], "standard": [10, 19, 100], "deviat": [10, 19], "sort": [10, 19, 34, 40, 51, 53, 55, 59, 60, 68, 73, 75, 77, 80, 86, 90, 97, 103, 105, 107, 115], "getsortedlistoftransactionlength": [10, 19], "each": [10, 19, 53, 55, 59, 60, 62, 93, 99], "fuzzydatabasestat": 10, "dataset": [10, 19, 23, 51, 53, 55, 58, 59, 60, 64, 68, 72, 73, 75, 77, 80, 83, 84, 86, 88, 90, 93, 97, 103, 105, 107, 109, 111, 113, 115], "max": [10, 19], "min": 10, "kei": [10, 13, 19, 46, 51, 55, 90, 99], "sparsiti": [10, 19], "varianc": [10, 19], "multipletimeseriesdatabasestat": 10, "fuzzi": [10, 11, 32, 34, 36, 38, 40, 42], "matrix": [10, 60], "densiti": 10, "disctribut": 10, "distribut": [10, 19], "ndarrai": [10, 19], "like": [10, 19, 93], "avarag": [10, 19], "minimun": [10, 19], "so": [10, 12, 19, 20, 62], "sequenti": [10, 19, 93, 100], "subsequ": [10, 19, 93], "self": [10, 19, 32, 51, 53, 55, 59, 60, 68, 90, 99], "per": [10, 12, 19, 20], "some": [10, 19, 62, 86], "statu": [10, 19], "about": [10, 19, 99], "_ap": [10, 19], "shota": [10, 19, 93], "suzuki": [10, 19, 93], "stamp": [10, 12, 19, 62], "inter": [10, 19], "arriv": [10, 19], "And": [10, 19, 93], "uncertaintemporaldatabasestat": 10, "_convert": [11, 93], "xmin": 12, "xmax": 12, "ymin": 12, "ymax": 12, "maxtimestamp": 12, "numberofitem": [12, 20], "itemchancelow": 12, "itemchancehigh": 12, "timestampchancelow": 12, "timestampchancehigh": 12, "spatiotempor": [12, 36, 46, 49, 53, 59, 60, 109], "give": [12, 93], "chanc": 12, "highest": 12, "lowest": 12, "100": [12, 13, 20], "9": 12, "accord": 12, "true": [12, 99, 100], "fals": [12, 99, 100, 103, 105, 107, 115], "numoftransact": [12, 20], "avglenoftransact": [12, 20], "numitem": [12, 20], "50": [12, 20], "typeoffil": [12, 20], "frame": [12, 58, 60, 66, 72], "depend": [12, 20, 51, 53, 55, 59, 60, 90], "cointoss": [12, 20], "correspond": [12, 20, 53, 59, 60, 99], "filenam": [12, 20, 73, 75, 77, 80, 86, 88, 97, 103, 105, 107, 115], "perform": [12, 20, 26, 28, 68, 99], "coin": [12, 20], "flip": [12, 20], "probabl": [12, 20, 59, 60, 107], "arraylength": [12, 20], "match": [12, 20], "15": 12, "6": [12, 34, 40, 69, 77, 78, 80, 97], "temporal_ot": 12, "percent": 12, "75": 12, "frameorbas": 12, "you": [12, 93, 100], "want": 12, "temporaldb": [12, 20], "bool": [12, 20, 60], "otherwis": [12, 23, 25, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 68, 69, 72, 73, 75, 77, 78, 80, 86, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "arrai": [12, 20, 53, 59, 60], "sumr": [12, 20], "equal": [12, 17, 20, 55], "target": [12, 20, 100], "numlin": [12, 20], "avgitemsperlin": [12, 20], "line": [12, 13, 20, 60, 62, 77, 88, 93], "num": [12, 20], "avg": [12, 20], "maxitem": [12, 20, 58, 60], "random": [12, 20], "n": [12, 20, 32, 68, 69, 93, 107], "whose": [12, 17, 20, 53], "dataframeintofigur": 13, "figur": 13, "column": 13, "xcolumn": 13, "axi": 13, "ycolumn": 13, "fig": 13, "end": [13, 58, 60, 62, 77], "titl": 13, "xlabel": 13, "ylabel": 13, "plotlinegraph": 13, "draw": 13, "idict": 13, "plotlinegraphfromdatafram": 13, "plotlinegraphfromdatfram": 13, "produc": [13, 101], "miner": [13, 34, 36, 38, 42, 51, 55, 59], "viz": 13, "markers": 13, "20": [13, 51, 53, 64, 90], "zoom": 13, "width": 13, "1500": 13, "height": 13, "1000": 13, "marker": 13, "level": 13, "screen": 13, "detected_object": 15, "appli": [15, 59, 96], "detected_obj": 15, "url": 16, "usernam": 16, "password": 16, "toaddress": 16, "subject": 16, "bodi": 16, "maxeuclediandist": 17, "euclid": 17, "distanc": [17, 53, 59], "pf": 17, "maxeuclideandist": 17, "pair": [17, 51, 55, 90, 98, 99, 100], "euclidean": 17, "less": [17, 96], "than": [17, 53, 55, 59, 60, 96], "maxeuclediandistac": 17, "maxdist": 17, "geodes": 17, "km": 17, "kilomet": 17, "maxdistac": 17, "transanct": 19, "requir": [20, 53, 55], "e": [20, 51, 53, 55, 59, 60, 90], "g": [20, 28, 40, 99, 100], "wai": 20, "text": [20, 99], "randomli": 20, "collect": 20, "ignor": 20, "metadata": 20, "avgtransact": 20, "synthet": 20, "geo": [20, 44, 46, 49, 51, 53, 105], "referenti": 20, "No": [20, 68], "noofitem": 20, "creategeoreferentialtransactiondatabas": 20, "uncertain": [20, 64, 101, 103, 105, 107, 115], "creategeoreferentialuncertaintransactiondatabas": 20, "createtemporalldatabas": 20, "transactions": 20, "signific": 20, "minutilityvalu": 20, "maxutilityvalu": 20, "minnumoftimesanitem": 20, "maxnumoftimesanitem": 20, "constructor": [20, 100], "initi": [20, 53, 59, 60], "targetsum": 20, "hemanth": 20, "sree": 20, "normal": 20, "multipli": 20, "output_fil": 20, "target_sum": 20, "databases": 20, "averagelengthoftransact": 20, "minimuminternalutilityvalu": 20, "maximuminternalutilityvalu": 20, "minimumexternalutilityvalu": 20, "maximumexternalutilityvalu": 20, "itemsup": [23, 96, 101], "minlength": [23, 101], "faulttoler": [23, 101], "_faulttolerantfrequentpattern": [23, 101], "ft": 23, "fault": [23, 101], "toler": [23, 101], "emploi": [23, 25, 26, 58, 60, 66, 72, 93, 101, 111, 113], "properti": [23, 25, 26, 77, 93, 101, 111, 113], "downward": [23, 25, 93, 101, 111, 113], "closur": [23, 25, 93, 101, 111, 113], "reduc": [23, 25, 34, 36, 38, 42, 64, 93, 101, 111, 113], "effect": [23, 25, 77, 93, 101, 111, 113], "pei": [23, 25, 34, 93, 115], "jian": 23, "tung": 23, "anthoni": 23, "jiawei": 23, "2001": [23, 93], "problem": [23, 34, 36, 38, 42], "challeng": [23, 34, 36, 38, 42], "falut": 23, "while": [23, 44, 46, 49, 55, 62, 68, 69, 72, 73, 75, 77, 78, 80, 86, 90, 93, 96, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115], "csvfile": [23, 25, 26, 28], "compress": [23, 25, 111, 113], "decreas": [23, 25, 68, 73, 75, 77, 80, 86, 97, 103, 105, 107, 111, 113, 115], "yin": [23, 25], "et": [23, 25, 42, 55, 59, 60, 78, 97], "al": [23, 25, 42, 55, 59, 60, 78, 97], "without": [23, 25, 83, 93, 100, 101], "candid": [23, 25, 51, 53, 55, 59, 60, 90, 93, 100], "approach": [23, 25, 68, 77, 93, 101, 103], "knowledg": [23, 25, 55, 93, 113], "53": [23, 25, 59], "87": [23, 25], "2004": [23, 25, 64, 93], "1023": [23, 25, 93], "scan": [23, 25, 53, 59, 60, 64, 68, 72, 73, 75, 77, 80, 83, 84, 86, 88, 96, 97, 103, 105, 107, 109, 111, 113, 115], "frequentoneitem": [23, 44, 46, 49, 64, 72, 83, 84, 88, 103, 105, 109, 111, 113, 115], "printtopk": [24, 30], "_frequentpattern": [25, 26, 28, 30, 64, 88, 103, 105], "breadth": [25, 62, 93], "agraw": 25, "imi": 25, "nski": 25, "swami": 25, "larg": [25, 36, 46, 59, 60, 77, 78, 80, 84, 90], "sigmod": 25, "207": 25, "216": 25, "1993": 25, "170035": 25, "170072": 25, "moham": [25, 26, 93], "jave": 25, "zaki": [25, 26, 93], "scalabl": [25, 46], "ieee": [25, 30, 36, 42, 49, 59, 60, 77, 78, 80, 90, 93], "tran": [25, 55, 93], "knowl": [25, 55, 93], "eng": [25, 93], "12": 25, "372": 25, "390": 25, "2000": 25, "ieeexplor": [25, 30, 78, 80], "document": [25, 30, 78, 80], "846291": 25, "kundai": [25, 38], "diffset": 25, "kdd": [25, 55], "03": 25, "proceed": [25, 26, 55, 68, 93, 111], "ninth": 25, "sigkdd": [25, 55, 93], "intern": [25, 36, 42, 49, 55, 59, 60, 68, 73, 78, 83, 90, 93, 96, 109, 111, 113], "confer": [25, 36, 42, 49, 55, 59, 60, 68, 73, 78, 83, 90, 93, 109, 111, 113], "august": 25, "page": [25, 68, 103, 116], "326": 25, "335": 25, "956750": 25, "956788": 25, "yudai": 25, "masu": 25, "implement": [25, 68, 77, 96], "we": [25, 34, 36, 38, 42], "itemset": [25, 26, 28, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 68, 90, 109, 111, 113, 115], "check": [25, 26, 51, 53, 55, 59, 60, 77, 88, 90, 99], "exist": [26, 60], "superset": [26, 53], "ha": [26, 66, 77, 86], "same": [26, 32, 34, 36, 38, 40, 42, 93], "origin": [26, 60, 100, 103, 105, 107, 115], "ching": 26, "jui": 26, "hsiao": 26, "2002": [26, 93], "siam": [26, 111], "sdm": 26, "457": 26, "473": 26, "1137": [26, 111], "9781611972726": 26, "27": 26, "tidlist": [26, 46, 62, 68, 72, 77, 83, 84, 96], "hash": [26, 77], "equival": [26, 44, 46, 68, 77, 96], "maxfp": 28, "grahn": 28, "zhu": 28, "high": [28, 32, 34, 36, 42, 51, 53, 55, 59, 60, 90], "enc": 28, "concordia": 28, "ca": 28, "paper": [28, 86], "hpdm03": 28, "pdf": [28, 32, 68, 69, 75, 86, 88, 111], "maxper": [28, 32, 34, 36, 38, 40, 42, 44, 46, 55, 59, 62, 66, 73, 75, 77, 78, 80, 83, 86, 88, 96, 97, 107, 109, 111], "functon": 28, "zhi": 30, "hong": [30, 34, 115], "deng": 30, "guo": 30, "dong": 30, "fang": 30, "1109": [30, 36, 42, 49, 59, 60, 73, 77, 90, 109, 113], "icmlc": 30, "2007": [30, 32], "4370261": 30, "xplore": 30, "analysi": 30, "topkfrequentpattern": 30, "element": [31, 32, 34, 36, 38, 40, 42, 46, 55, 59, 60, 99], "iutil": 32, "rutil": 32, "keep": [32, 34, 36, 38, 40, 42, 51, 53, 55, 59, 60, 90], "tact": 32, "id": [32, 99, 100], "rest": 32, "_corelatedfuzzyfrequentpattern": 32, "algorithm": [32, 34, 40, 44, 46, 49, 51, 55, 58, 59, 60, 62, 64, 66, 69, 72, 75, 80, 83, 84, 99, 101, 105], "lin": [32, 34, 55, 96, 103, 115], "chueh": 32, "h": [32, 40, 93], "citeseerx": 32, "ist": 32, "psu": 32, "edu": 32, "viewdoc": 32, "download": 32, "416": 32, "6053": 32, "rep": 32, "rep1": 32, "thi": [32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 58, 59, 60, 62, 64, 66, 68, 69, 72, 75, 80, 83, 84, 99, 100, 101, 105], "spatial": [32, 34, 36, 38, 40, 42, 46, 49, 53, 55, 59, 60, 109], "starttimetim": 32, "itemscnt": [32, 34, 36, 38, 40, 42], "mapitemslowsum": [32, 34, 36, 42], "track": [32, 34, 36, 38, 40, 42, 55, 59], "low": [32, 34, 36, 42, 60], "region": [32, 34, 36, 38, 40, 42], "mapitemsmidsum": [32, 34, 36, 40, 42], "middl": [32, 34, 36, 42], "mapitemshighsum": [32, 34, 36, 42], "mapitemsum": [32, 34, 36, 38, 40, 42], "mapitemregion": [32, 34, 36, 38, 40, 42], "jointcnt": [32, 34, 42], "ffi": [32, 34, 36, 38, 40, 42], "construct": [32, 34, 36, 38, 40, 42, 55, 59, 68, 77, 99, 100, 103, 105, 107, 115], "buffers": [32, 34, 36, 38, 40, 42], "buffer": [32, 34, 36, 38, 40, 42], "itembuff": [32, 34, 42], "starttimemin": 32, "getratio": 32, "prefixlen": [32, 34, 36, 38, 40, 42, 55, 59], "ration": 32, "fsfimin": [32, 34, 36, 38, 42], "fsfim": [32, 34, 36, 38, 40, 42], "px": [32, 34, 36, 38, 40, 42], "findelementwithtid": [32, 34, 36, 38, 40, 42], "ulist": [32, 34, 36, 38, 40, 42, 55, 59], "writeout": [32, 34, 36, 38, 40, 42], "sumiutil": [32, 34, 36, 38, 40, 42], "patten": [32, 34, 36, 38, 40, 42, 62], "correlatedfuzzyfrequentpattern": 32, "_fuzzyfrequentpattenr": 34, "desir": [34, 36, 38, 42], "trivial": [34, 36, 38, 42], "huge": [34, 36, 38, 42], "prune": [34, 36, 38, 42], "chun": [34, 103, 115], "wei": [34, 55, 103, 115], "li": 34, "ting": 34, "fournier": [34, 46, 51, 55, 62, 69, 73, 90, 96, 97, 107, 115], "viger": [34, 46, 51, 55, 62, 73, 90, 96, 97, 107, 115], "philipp": [34, 46, 69, 83, 107, 115], "tzung": [34, 103, 115], "fast": [34, 55], "29": 34, "2373": 34, "2379": 34, "3233": 34, "IFS": 34, "151936": 34, "researchg": 34, "net": 34, "public": 34, "286510908_a_fast_algorithm_for_mining_fuzzy_frequent_itemset": 34, "fmfile": 34, "membership": 34, "joinscnt": [34, 36, 38, 40, 42], "compareitem": [34, 40], "o1": [34, 40], "o2": [34, 40], "ascend": [34, 40], "ffiminermin": 34, "fuzzymembership": 34, "_fuzzyspatialfrequentpattern": [36, 38], "veena": [36, 59, 105, 107], "chithra": 36, "u": [36, 42, 51, 60, 62, 68, 69, 75, 77, 84, 86, 90, 93, 96, 111], "agarw": 36, "zettsu": [36, 46, 49, 59, 73, 77, 80, 105, 107, 109], "quantit": [36, 40, 42, 59, 60], "2021": [36, 59, 60, 77, 90, 107], "fuzz": [36, 42], "fuzz45933": 36, "9494594": 36, "neighbor": [36, 38, 55, 59, 99], "intersect": [36, 38, 53, 58, 59, 60], "neighbourx": [36, 38], "neighbouri": [36, 38], "common": [36, 38, 44, 46, 49, 53, 59, 60], "samplen": [36, 38, 44, 46, 49, 53, 55, 59, 60], "fuzzyspatialfrequentpattern": 36, "generategraph": [37, 38], "kwangwari": 38, "_fuzzypartialperiodicpattern": 40, "partial": [40, 49, 66, 68, 69, 72], "irregulat": 40, "mapitemsgsum": 40, "mapitemshsum": 40, "f3pmine": 40, "palla": [40, 72, 105, 107], "_fuzzyperiodicfrequentpattern": 42, "2020": [42, 46, 62, 78, 80], "glasgow": 42, "uk": 42, "fuzz48607": 42, "9177579": 42, "maxtid": 42, "lasttid": 42, "last": [42, 60, 62, 99], "itemstoregion": 42, "il": 42, "mapneighbour": [43, 44, 46, 48, 49], "_georeferencedperiodicfrequentpattern": 44, "extens": [44, 46, 99], "\u00e9clat": 44, "stand": [44, 46], "cluster": [44, 46], "bottom": [44, 46], "up": [44, 46, 55], "lattic": [44, 46], "travers": [44, 46, 99], "referenc": [44, 46, 49, 51, 53, 105], "avail": [44, 46, 49, 93], "ifilenam": [44, 46, 49], "getneighbouritem": [44, 46, 49], "keyset": [44, 46, 49], "neighbourfil": [44, 46, 49], "georeferencedperidicfrequentpattern": 44, "ravikumar": [44, 59, 68, 77, 84], "_spatialfrequentpattern": 46, "popular": 46, "more": [46, 93], "version": [46, 77], "koji": [46, 59, 77, 80, 105, 107], "toyoda": [46, 49, 51, 80, 86, 90, 107, 109], "masashi": [46, 68, 69, 80, 86, 107], "masaru": [46, 68, 69, 80, 86, 88], "veri": [46, 59, 60, 77, 78, 80, 84, 90], "dictkeystoint": 46, "ilist": 46, "eclatgener": [46, 72, 83, 84], "clist": 46, "generatespatialfrequentpattern": 46, "spatialfrequentpattern": 46, "minp": [49, 68, 72, 86, 88], "maxiat": 49, "_partialperiodicspatialpattern": 49, "georeferenec": 49, "c": [49, 68, 73, 86, 93, 96, 99, 100, 101, 109], "saideep": [49, 73, 86], "2019": [49, 51, 73, 96, 109], "big": [49, 59, 60, 72, 78, 84, 90], "233": 49, "238": 49, "bigdata47090": 49, "9005693": 49, "partialperiodicspatialpattern": 49, "_utilitypattern": [51, 53, 55, 59, 90], "help": [51, 90], "hufi": 51, "cutoff": 51, "suffix": 51, "pakdd": [51, 69, 77, 105], "030": [51, 53, 107], "16145": 51, "3_15": 51, "candidatecount": [51, 53, 55, 58, 59, 60, 90], "maxmemori": [51, 53, 55, 58, 59, 60, 90], "highutilityfrequentitemset": 51, "utilitybinarraylu": [51, 53, 55, 58, 59, 60, 90], "hold": [51, 53, 55, 59, 60, 90], "local": [51, 53, 55, 59, 60, 62, 90], "utilitybinarraysu": [51, 53, 55, 58, 59, 60, 90], "subtre": [51, 53, 55, 59, 60, 90], "oldnamestonewnam": [51, 53, 55, 58, 59, 60, 90], "old": [51, 53, 55, 59, 60, 90], "new": [51, 53, 55, 59, 60, 64, 68, 69, 75, 86, 90, 99, 103], "newnamestooldnam": [51, 53, 55, 58, 59, 60, 90], "singleitemsetssupport": 51, "singleitemsetsutil": 51, "patterncount": [51, 53, 55, 59, 90], "rhui": [51, 90], "itemstokeep": [51, 53, 55, 59, 60, 90], "promis": [51, 53, 55, 59, 60, 90], "extend": [51, 53, 55, 59, 60, 90, 99], "other": [51, 62, 86, 90, 93, 96], "itemstoexplor": [51, 53, 55, 59, 60, 90], "need": [51, 90, 99], "explor": [51, 90, 93, 99], "backtrackinghufim": 51, "transactionsofp": [51, 53, 55, 59, 60, 90], "prefixlength": [51, 53, 55, 59, 60, 88, 90], "recurs": [51, 53, 55, 59, 60, 68, 77, 90, 93, 96, 99], "useutilitybinarraystocalculateupperbound": [51, 53, 55, 58, 59, 60, 90], "transactionsp": [51, 53, 55, 59, 60, 90], "sub": [51, 53, 55, 59, 60, 90], "tempposit": [51, 53, 55, 59, 60, 90], "rel": [51, 88, 90], "what": [51, 53, 55, 59, 60, 90, 93], "chose": [51, 53, 55, 59, 60, 90], "isequ": [51, 53], "transaction1": [51, 53, 55, 59, 60, 90], "transaction2": [51, 53, 55, 59, 60, 90], "ident": [51, 53, 55, 59, 60, 90], "useutilitybinarraytocalculatesubtreeutilityfirsttim": [51, 53, 55, 58, 59, 60, 90], "sortdatabas": [51, 53, 55, 58, 59, 60, 89, 90], "sorttransact": [51, 53], "trans1": [51, 53, 55, 59, 60, 90], "trans2": [51, 53, 55, 59, 60, 90], "useutilitybinarraytocalculatelocalutilityfirsttim": [51, 53, 55, 58, 59, 60, 90], "35": [51, 53, 55, 59, 60, 90], "pradeep": [51, 53, 55, 59, 60, 90], "pallikila": [51, 53, 55, 59, 60, 90], "37188": 53, "3_17": 53, "highutilityfrequentspatialitemset": 53, "pmu": [53, 59, 60], "shufi": 53, "ie": [53, 55, 59, 60], "subtreeutil": [53, 55, 59, 60], "grater": [53, 59, 60], "calculateneighbourintersect": [53, 58, 59, 60], "backtrackingefim": [53, 55, 58, 59, 60], "shui": [53, 59], "neighbourhoodlist": [53, 59, 60], "av": [53, 59, 60], "lst1": [53, 59, 60], "lst2": [53, 59, 60], "bin": [53, 59, 60], "sort_transact": [54, 55, 58, 59, 60, 89, 90], "fastest": 55, "zida": 55, "cw": 55, "inf": [55, 99, 100], "syst": [55, 64], "51": 55, "595": 55, "625": 55, "2017": [55, 68, 113], "s10115": 55, "016": 55, "0986": 55, "highutilityitemset": [55, 59], "hui": [55, 59], "greater": [55, 96], "is_equ": [55, 58, 60, 90], "_pd": 55, "_transact": [55, 90], "ifile1": 55, "hmier": 55, "mapfmap": [55, 59], "euc": [55, 59], "fhm": [55, 59], "genet": 55, "huicnt": [55, 59], "nighbou": 55, "explore_searchtre": [55, 59], "updateclos": [55, 59], "cul": [55, 59], "st": [55, 59], "excul": [55, 59], "newt": [55, 59], "ex": [55, 59], "ey_t": 55, "updat": [55, 59, 68, 73, 75, 77, 80, 86, 93, 97, 99, 103, 105, 107, 115], "saveitemset": [55, 59, 88], "updateel": [55, 59], "z": [55, 59], "duppo": 55, "vale": [55, 59], "duplic": [55, 59], "construccul": 55, "exnighbor": 55, "phase": 55, "vincent": 55, "tseng": 55, "cheng": 55, "wu": 55, "bai": 55, "en": 55, "shie": 55, "philip": 55, "yu": 55, "2010": 55, "16th": 55, "machineri": 55, "york": 55, "ny": 55, "usa": [55, 59, 60, 90], "253": 55, "262": [55, 90], "1835804": 55, "1835839": 55, "numberofnod": 55, "node": [55, 61, 62, 68, 73, 75, 76, 77, 80, 86, 97, 103, 105, 107, 115], "build": 55, "parentnumberofnod": 55, "parent": [55, 62, 77, 107], "mapitemtominimumutil": 55, "phui": 55, "mapitemtotwu": 55, "twu": [55, 59, 60], "createlocaltre": 55, "alpha": 55, "getmaxitem": [58, 60], "additemset": [58, 60], "heaplist": [58, 60], "inttostr": [58, 60], "strtoint": [58, 60], "temp": [58, 60], "getitem": [58, 60], "getlastposit": [58, 60], "getpmu": [58, 60], "insertionsort": [58, 60], "offset": [58, 60], "prefixutil": [58, 60], "projecttransact": [58, 60], "removeunpromisingitem": [58, 60], "abc": [58, 60, 66, 72], "actual": [58, 60], "interest": [58, 59, 60, 96], "model": 59, "mani": 59, "real": 59, "world": 59, "involv": 59, "spatio": 59, "orlando": [59, 60, 90], "fl": [59, 60, 90], "4925": [59, 60], "4935": [59, 60], "bigdata52589": [59, 60, 90], "9671912": [59, 60], "mapofpmu": 59, "constructcul": 59, "compactulist": 59, "exneighbour": 59, "eyt": 59, "duprevpo": 59, "pamalla": [59, 105], "penugonda": 59, "raj": 59, "bathala": 59, "dao": [59, 107], "minh": 59, "bommisetti": 59, "2023": [59, 86, 105, 107], "hdshui": 59, "novel": [59, 64], "dimension": 59, "26": 59, "s10489": [59, 97], "022": 59, "04436": 59, "when": [59, 60, 62, 93], "_isequ": 59, "datasetpath": 60, "largest": 60, "tkshui": 60, "add": [60, 62, 93, 98, 99, 100], "prioriti": 60, "queue": [60, 100], "ad": [60, 62, 73, 75, 77, 80, 86, 93, 97, 103, 105, 107, 115], "numpi": 60, "current": 60, "secondari": 60, "project": 60, "primari": 60, "item1": 60, "item2": 60, "whether": [60, 99, 100], "both": [60, 93], "posit": [60, 88, 107], "transactionutil": 60, "utilit": 60, "pointer": 60, "projectedtransact": 60, "till": 60, "remov": [60, 68, 73, 75, 77, 80, 86, 97, 99, 103, 105, 107, 115], "over": [60, 99], "nams": 60, "getchild": [61, 62], "addtransact": [61, 62], "createprefixtre": [61, 62], "deletenod": [61, 62], "fixnodelink": [61, 62], "maxsop": 62, "mindur": 62, "_localperiodicpattern": 62, "event": 62, "behavior": 62, "non": [62, 86, 103, 105, 115], "predefin": 62, "said": 62, "regularli": 62, "continu": 62, "spillov": 62, "allow": 62, "where": [62, 68, 77, 90, 93, 99, 100, 103, 105, 107, 109, 115], "minim": 62, "durat": 62, "ensur": 62, "those": 62, "yang": [62, 96, 97], "ventura": 62, "luna": [62, 90], "discret": 62, "elsevi": 62, "ppt": 62, "1016": [62, 64, 77, 103], "ins": 62, "09": [62, 64, 103], "044": 62, "consecut": 62, "tsmin": 62, "date": 62, "tsmax": 62, "ptl": 62, "tslist": 62, "bit": [62, 93, 101], "vector": [62, 101], "root": [62, 68, 73, 75, 77, 80, 86, 97, 103, 105, 107, 115], "whole": 62, "findsepar": 62, "split": 62, "cretelpplist": 62, "createtslist": 62, "generatelpp": 62, "createlpptre": 62, "lpptree": 62, "patterngrowth": 62, "prefixpflist": 62, "calculateptl": 62, "calculateptlbit": 62, "getlocalperiodicpattern": 62, "lppmgrowth": 62, "minsop": 62, "f": 62, "nakamura": 62, "localperiodicpatterntre": 62, "child": 62, "children": [62, 77], "nodelink": 62, "next": [62, 93], "itemnam": [62, 77], "don": 62, "frequentpatterngrowth": 62, "structur": [62, 99], "firstnodelink": 62, "branch": [62, 77], "frequentpatterntre": [62, 77, 88], "newnod": 62, "delet": [62, 93], "timestamplist": 62, "fix": 62, "lppmbreathsearch": 62, "extensionofp": 62, "lppbreadth": 62, "lppmdepthsearch": 62, "mi": 64, "ya": 64, "hu": 64, "yen": 64, "liang": 64, "chen": [64, 93], "2006": 64, "mechan": 64, "deci": 64, "42": [64, 93], "octob": 64, "24": 64, "dss": 64, "007": 64, "were": [64, 103, 105, 107, 109, 111, 115], "appropri": [64, 103, 105, 107, 109, 111, 115], "limit": [64, 103, 105, 107, 109, 111, 115], "misfil": 64, "11": [64, 93], "edbt": 64, "1951365": 64, "1951370": 64, "minpr": 66, "userspecifi": 66, "term": 66, "getfrequentpattern": [66, 88], "execut": [66, 83], "_partialperiodicpattern": [68, 69, 73], "3pgrowth": 68, "ssdbm": 68, "17": 68, "29th": 68, "scientif": 68, "statist": 68, "managementjun": 68, "30": 68, "6http": 68, "3085504": 68, "3085535": 68, "partialperiodiconeitem": 68, "updatetransact": [68, 103, 105, 107, 115], "aperiod": [68, 73, 75, 77, 80, 97, 107], "buildtre": [68, 73, 75, 77, 80, 86, 97, 103, 105, 107, 115], "constrcut": 68, "null": [68, 73, 75, 77, 80, 86, 90, 97, 103, 105, 107, 115], "getpatternindf": 68, "descripit": 68, "3peclat": 68, "kirana": 68, "venkateshd": 68, "toyodaa": 68, "kitsuregawaa": 68, "tkl": [68, 69, 75, 86], "ii": [68, 69, 75, 86], "tokyo": [68, 69, 75, 86], "ac": [68, 69, 75, 86, 93], "jp": [68, 69, 75, 86], "upload": [68, 69, 75, 86], "publication_fil": [68, 69, 75, 86], "774": 68, "jss_2017": 68, "creatingoneitemset": [68, 77], "getperiodandsupport": [68, 76, 77], "block": [68, 72, 83, 96], "periodicsupport": [69, 73], "kiran1": 69, "venkatesh2": 69, "viger3": 69, "toyoda1": 69, "reddy2": 69, "799": 69, "031": [72, 84, 105], "39847": 72, "6_28": 72, "candidatelist": [72, 83, 84, 93], "generatefrequentpattern": [72, 83, 84], "topk_pppgrowth": 72, "irregular": 73, "workshop": [73, 109], "icdmw": [73, 109], "1020": 73, "1028": 73, "00147": 73, "periodicfrequentoneitem": [73, 75, 77, 80, 97, 107], "updatedatabas": [73, 75, 77, 80, 86, 97, 107], "remain": [73, 75, 77, 86, 97, 103, 105, 107, 115], "maxperallconf": 75, "_periodiccorrelatedpattern": 75, "897": 75, "venkatesh2018_chapter_discoveringperiod": 75, "maaxperallconf": 75, "thr": 75, "addchild": [76, 77], "conditionaltransact": [76, 77], "topkpfp": [76, 82], "kpfpminer": [76, 82], "_periodicfrequentpattern": [77, 78, 80, 83, 84, 107], "watanob": [77, 80, 84], "toward": [77, 88], "columnar": 77, "iea": [77, 96], "aie": [77, 96], "sy": 77, "khairuzzaman": 77, "tanbeer": 77, "chowdhuri": 77, "farhan": 77, "byeong": 77, "soo": 77, "jeong": 77, "young": 77, "koo": 77, "2009": 77, "01307": 77, "2_24": 77, "improv": 77, "greedi": 77, "udaykiran": 77, "masarukitsuregawa": 77, "krishnareddyd": 77, "softwar": 77, "februari": 77, "2016": 77, "jss": 77, "035": 77, "delimit": [77, 88], "pfpgorwthplu": 77, "append": [77, 99], "detail": 77, "anirudh": 77, "kitsuregawai": 77, "symposium": 77, "ssci": 77, "7849926": 77, "getconditionalpatternsindatafram": 77, "onelengthitem": 77, "_interv": 77, "fail": [77, 101], "9378215": 78, "maxpf": 80, "yutaka": 80, "bhaskar": 80, "chaudhuri": 80, "9260063": 80, "maxpfrowth": 80, "topkpfpgrowth": [82, 83], "getper_sup": [82, 84], "komat": 83, "amphawan": [83, 113], "lenca": 83, "athasit": 83, "surarerk": 83, "advanc": [83, 101], "technologi": [83, 113], "chapter": 83, "10392": 83, "6_3": 83, "sampl": 83, "credit": 83, "2022": [84, 97, 107], "analyt": 84, "bda": 84, "13773": 84, "cham": [84, 107], "24094": 84, "2_14": 84, "minrec": 86, "_recurringpattern": 86, "recur": 86, "haichuan": 86, "shang": 86, "693": 86, "could": 86, "potenti": 86, "relat": [86, 99], "numer": 86, "recommend": 86, "oneitem": 86, "possibl": 86, "minr": 88, "comad": 88, "comad2012": 88, "saveallcombin": 88, "tempbuff": 88, "frequentpatterngrowthgener": 88, "port": 88, "particular": 88, "__mapsupport": 88, "__minratio": 88, "23": 88, "minur": 90, "invari": 90, "252": 90, "9672064": 90, "relativehighutilityitemset": 90, "backtrackingrhuim": 90, "make1lendatabas": [92, 93], "make2lendatabas": [92, 93], "make3lendatabas": [92, 93], "makenextrow": [92, 93], "makenextrowsam": [92, 93], "makenextrowsame2": [92, 93], "makenextrowsame3": [92, 93], "makexlendatabas": [92, 93], "makexlendatabasesam": [92, 93], "dfsprune": [92, 93], "sstep": [92, 93], "countsup": [92, 93], "make2bitdatabas": [92, 93], "getsameseq": [92, 93], "makenext": [92, 93], "makenextsam": [92, 93], "makeseqdatabasefirst": [92, 93], "makeseqdatabasesam": [92, 93], "makesupdatabas": [92, 93], "serchsam": [92, 93], "_sequentialpattern": 93, "serch": 93, "abov": 93, "mach": 93, "learn": [93, 115], "januari": 93, "31": 93, "60": 93, "1007652502315": 93, "dx": 93, "_xlendatabas": 93, "differ": 93, "rownumb": 93, "_xlendatabasesam": 93, "candidatetofrequ": 93, "frequenttocandid": 93, "frequentlist": 93, "make": 93, "join": 93, "xlen": 93, "latestword": 93, "latestword2": 93, "row": 93, "latest": 93, "word": 93, "previou": 93, "latestword1": 93, "rowlen": 93, "seq": 93, "xlendatabas": 93, "prnt": 93, "ayr": 93, "gehrk": 93, "yiu": 93, "flannick": 93, "bitmap": 93, "eighth": 93, "edmonton": 93, "alberta": 93, "canada": 93, "juli": 93, "_iddatabas": 93, "_maxseqlen": 93, "_creatingitemset": 93, "istep": 93, "again": 93, "them": [93, 99], "until": 93, "ssteo": 93, "ones": 93, "010101": 93, "001111": 93, "00001001": 93, "00000111": 93, "pattren": 93, "got": 93, "befor": 93, "presum": 93, "relationship": 93, "later": 93, "ab": 93, "span": 93, "mortazavi": 93, "asl": 93, "wang": [93, 97], "pinto": 93, "q": 93, "dayal": 93, "hsu": 93, "1424": 93, "1440": 93, "startrow": 93, "sepdatabas": 93, "head": 93, "chang": 93, "maxla": [96, 97], "_stableperiodicfrequentpattern": [96, 97], "stabl": [96, 97], "dicov": 96, "three": 96, "contraint": 96, "labil": 96, "proc": 96, "32nd": 96, "conf": 96, "industri": 96, "engin": 96, "lnai": 96, "230": 96, "244": 96, "loss": 96, "calculatela": 96, "pfpeclat": 96, "appl": 97, "intel": 97, "52": 97, "6917": 97, "6938": 97, "020": 97, "02181": 97, "includ": [97, 99], "containedg": [98, 99, 100], "copi": [98, 99, 100], "getallvlabel": [98, 99, 100], "getat": [98, 99, 100], "geteelist": [98, 99, 100], "getrightmost": [98, 99, 100], "getrightmostpath": [98, 99, 100], "isempti": [98, 99, 100], "notpreofrm": [98, 99, 100], "onrightmostpath": [98, 99, 100], "getedgelabel": [98, 99, 100], "getv1": [98, 99, 100], "getv2": [98, 99, 100], "getvlabel1": [98, 99, 100], "getvlabel2": [98, 99, 100], "pairsmallerthan": [98, 99, 100], "smallerthan": [98, 99, 100], "smallerthanorigin": [98, 99, 100], "emptyintegerarrai": [98, 99], "emptyvertexlist": [98, 99], "findallwithlabel": [98, 99, 100], "getallneighbor": [98, 99, 100], "getallvertic": [98, 99, 100], "getedg": [98, 99, 100], "getedgecount": [98, 99, 100], "getid": [98, 99, 100], "getnonprecalculatedallvertic": [98, 99, 100], "getvlabel": [98, 99, 100], "isneighbor": [98, 99, 100], "precalculatelabelstovertic": [98, 99, 100], "precalculatevertexlist": [98, 99, 100], "precalculatevertexneighbor": [98, 99, 100], "removeinfrequentlabel": [98, 99, 100], "edge_count_prun": [98, 99, 100], "eliminate_infrequent_edge_label": [98, 99, 100], "eliminate_infrequent_vertex_pair": [98, 99, 100], "eliminate_infrequent_vertic": [98, 99, 100], "findallonlyonevertex": [98, 99, 100], "getfrequentsubgraph": [98, 99], "gspandf": [98, 99, 100], "iscanon": [98, 99, 100], "readgraph": [98, 99, 100], "removeinfrequentvertexpair": [98, 99, 100], "rightmostpathextens": [98, 99, 100], "rightmostpathextensionsfromsingl": [98, 99, 100], "subgraphisomorph": [98, 99, 100], "getsupportforitem": [98, 99, 100], "incrementcount": [98, 99, 100], "removeinfrequententriesfrommatrix": [98, 99, 100], "setsupport": [98, 99, 100], "addedg": [98, 99, 100], "getedgelist": [98, 99, 100], "getlabel": [98, 99, 100], "removeedg": [98, 99, 100], "empty_integer_arrai": [98, 100], "empty_vertex_list": [98, 100], "dynamic_search": [98, 100], "threaded_dynamic_search": [98, 100], "getksubgraph": [98, 100], "getminsupport": [98, 100], "getqueues": [98, 100], "getsubgraph": [98, 100], "gspandynamicdf": [98, 100], "registerascandid": [98, 100], "startthread": [98, 100], "ee": [99, 100], "rightmost": 99, "certain": 99, "v1": [99, 100], "v2": [99, 100], "label": [99, 100], "index": [99, 100, 116], "rightmostpath": 99, "edgelabel": [99, 100], "vlabel1": [99, 100], "vlabel2": [99, 100], "x1": [99, 100], "x2": [99, 100], "y1": [99, 100], "y2": [99, 100], "setofgraphsid": [99, 100], "vmap": [99, 100], "targetlabel": [99, 100], "precalcul": 99, "vertic": [99, 100], "iter": 99, "through": 99, "cach": 99, "specif": [99, 100], "accordingli": 99, "minsupport": 99, "outputsinglevertic": [99, 100], "maxnumberofedg": [99, 100], "outputgraphid": [99, 100], "_gspan": 99, "graphdb": [99, 100], "outputfrequentvertic": [99, 100], "option": 99, "infrequ": 99, "boolean": [99, 100], "flag": 99, "determin": 99, "subgraphid": [99, 100], "subgraph": [99, 100], "provid": 99, "snippet": 99, "instanc": 99, "_ab": [99, 100], "itself": 99, "doe": 99, "modifi": 99, "found": 99, "dure": 99, "canon": 99, "compar": 99, "pars": 99, "graphid": [99, 100], "seem": 99, "respons": 99, "identifi": 99, "These": 99, "focus": 99, "design": 99, "outputpath": 99, "isomorph": 99, "try": 99, "indic": [99, 100], "valid": 99, "vlabel": [99, 100], "edgetoremov": [99, 100], "tkginstanc": 100, "thread": 100, "activ": 100, "mai": 100, "subclass": 100, "invok": 100, "callabl": 100, "pass": 100, "argument": 100, "keyword": 100, "arg": 100, "kwarg": 100, "_tkg": 100, "empti": 100, "represent": 101, "koh": 101, "jl": 101, "yo": 101, "pw": 101, "2005": [101, 111], "l": 101, "ooi": 101, "meng": 101, "dasfaa": 101, "3453": 101, "11408079_51": 101, "frequentfrequ": 101, "abil": 101, "handl": 101, "error": 101, "inconsist": 101, "incorrect": 101, "cufp": 103, "peihong": 103, "39": 103, "issu": 103, "march": 103, "4084": 103, "4093": 103, "eswa": 103, "087": 103, "gfp": 105, "33380": 105, "4_3": 105, "neighborfil": 105, "sampleneighbor": 105, "printtre": [106, 107], "zhang": 107, "mantoro": 107, "ayu": 107, "wong": 107, "hidayanto": 107, "neural": 107, "iconip": 107, "commun": 107, "1516": 107, "92307": 107, "5_83": 107, "_lno": 107, "removefalseposit": 107, "plu": 107, "upfp": 107, "1792": 107, "singapor": 107, "981": 107, "99": 107, "1642": 107, "9_16": 107, "floot": [107, 109], "minw": 109, "_weightedfrequentspatialpattern": 109, "weight": [109, 111, 113, 115], "987": 109, "996": 109, "00143": 109, "minweight": [109, 111], "weightfil": [109, 111], "weightfrequentneighbourhoodpattern": 109, "wfile": [109, 111, 113, 115], "_weightedfrequentpattern": [111, 115], "wfminer": 111, "yun": 111, "leggett": 111, "636": 111, "640": 111, "epub": 111, "9781611972757": 111, "76": 111, "weightsampl": 111, "weightfrequentpattern": 111, "_wfile": 113, "regular": 113, "_weightedfrequentregularpattern": 113, "wfri": 113, "klangwisan": 113, "9th": 113, "smart": 113, "kst": 113, "66": 113, "71": 113, "7886090": 113, "weightsupport": 113, "expsup": 115, "expwsup": 115, "puf": 115, "book": 115, "machin": 115, "recognit": 115, "jerri": 115, "wensheng": 115, "gan": 115, "packag": [116, 117], "modul": [116, 117], "subpackag": 117, "content": 117}, "objects": {"": [[0, 0, 0, "-", "PAMI"]], "PAMI": [[1, 0, 0, "-", "AssociationRules"], [3, 0, 0, "-", "correlatedPattern"], [5, 0, 0, "-", "coveragePattern"], [7, 0, 0, "-", "extras"], [22, 0, 0, "-", "faultTolerantFrequentPattern"], [24, 0, 0, "-", "frequentPattern"], [31, 0, 0, "-", "fuzzyCorrelatedPattern"], [33, 0, 0, "-", "fuzzyFrequentPattern"], [35, 0, 0, "-", "fuzzyGeoreferencedFrequentPattern"], [37, 0, 0, "-", "fuzzyGeoreferencedPeriodicFrequentPattern"], [39, 0, 0, "-", "fuzzyPartialPeriodicPatterns"], [41, 0, 0, "-", "fuzzyPeriodicFrequentPattern"], [43, 0, 0, "-", "geoReferencedPeriodicFrequentPattern"], [45, 0, 0, "-", "georeferencedFrequentPattern"], [47, 0, 0, "-", "georeferencedFrequentSequencePattern"], [48, 0, 0, "-", "georeferencedPartialPeriodicPattern"], [50, 0, 0, "-", "highUtilityFrequentPattern"], [52, 0, 0, "-", "highUtilityGeoreferencedFrequentPattern"], [54, 0, 0, "-", "highUtilityPattern"], [57, 0, 0, "-", "highUtilityPatternsInStreams"], [58, 0, 0, "-", "highUtilitySpatialPattern"], [61, 0, 0, "-", "localPeriodicPattern"], [63, 0, 0, "-", "multipleMinimumSupportBasedFrequentPattern"], [65, 0, 0, "-", "partialPeriodicFrequentPattern"], [67, 0, 0, "-", "partialPeriodicPattern"], [73, 0, 0, "-", "partialPeriodicPatternInMultipleTimeSeries"], [74, 0, 0, "-", "periodicCorrelatedPattern"], [76, 0, 0, "-", "periodicFrequentPattern"], [85, 0, 0, "-", "recurringPattern"], [87, 0, 0, "-", "relativeFrequentPattern"], [89, 0, 0, "-", "relativeHighUtilityPattern"], [91, 0, 0, "-", "sequence"], [92, 0, 0, "-", "sequentialPatternMining"], [95, 0, 0, "-", "stablePeriodicFrequentPattern"], [98, 0, 0, "-", "subgraphMining"], [101, 0, 0, "-", "uncertainFaultTolerantFrequentPattern"], [102, 0, 0, "-", "uncertainFrequentPattern"], [104, 0, 0, "-", "uncertainGeoreferencedFrequentPattern"], [106, 0, 0, "-", "uncertainPeriodicFrequentPattern"], [108, 0, 0, "-", "weightedFrequentNeighbourhoodPattern"], [110, 0, 0, "-", "weightedFrequentPattern"], [112, 0, 0, "-", "weightedFrequentRegularPattern"], [114, 0, 0, "-", "weightedUncertainFrequentPattern"]], "PAMI.AssociationRules": [[2, 0, 0, "-", "basic"]], "PAMI.AssociationRules.basic": [[2, 0, 0, "-", "ARWithConfidence"], [2, 0, 0, "-", "ARWithLeverage"], [2, 0, 0, "-", "ARWithLift"], [2, 0, 0, "-", "RuleMiner"], [2, 0, 0, "-", "abstract"]], "PAMI.AssociationRules.basic.ARWithConfidence": [[2, 1, 1, "", "ARWithConfidence"]], "PAMI.AssociationRules.basic.ARWithConfidence.ARWithConfidence": [[2, 2, 1, "", "getMemoryRSS"], [2, 2, 1, "", "getMemoryUSS"], [2, 2, 1, "", "getPatterns"], [2, 2, 1, "", "getPatternsAsDataFrame"], [2, 2, 1, "", "getRuntime"], [2, 2, 1, "", "mine"], [2, 2, 1, "", "printResults"], [2, 2, 1, "", "save"], [2, 2, 1, "", "startMine"]], "PAMI.AssociationRules.basic.ARWithLeverage": [[2, 1, 1, "", "ARWithLeverage"]], "PAMI.AssociationRules.basic.ARWithLeverage.ARWithLeverage": [[2, 2, 1, "", "getMemoryRSS"], [2, 2, 1, "", "getMemoryUSS"], [2, 2, 1, "", "getPatterns"], [2, 2, 1, "", "getPatternsAsDataFrame"], [2, 2, 1, "", "getRuntime"], [2, 2, 1, "", "mine"], [2, 2, 1, "", "printResults"], [2, 2, 1, "", "save"], [2, 2, 1, "", "startMine"]], "PAMI.AssociationRules.basic.ARWithLift": [[2, 1, 1, "", "ARWithLift"], [2, 1, 1, "", "Lift"]], "PAMI.AssociationRules.basic.ARWithLift.ARWithLift": [[2, 2, 1, "", "getMemoryRSS"], [2, 2, 1, "", "getMemoryUSS"], [2, 2, 1, "", "getPatterns"], [2, 2, 1, "", "getPatternsAsDataFrame"], [2, 2, 1, "", "getRuntime"], [2, 2, 1, "", "mine"], [2, 2, 1, "", "printResults"], [2, 2, 1, "", "save"], [2, 2, 1, "", "startMine"]], "PAMI.AssociationRules.basic.ARWithLift.Lift": [[2, 2, 1, "", "run"]], "PAMI.AssociationRules.basic.RuleMiner": [[2, 1, 1, "", "Confidence"], [2, 1, 1, "", "Leverage"], [2, 1, 1, "", "Lift"], [2, 1, 1, "", "RuleMiner"]], "PAMI.AssociationRules.basic.RuleMiner.Confidence": [[2, 2, 1, "", "run"]], "PAMI.AssociationRules.basic.RuleMiner.Leverage": [[2, 2, 1, "", "run"]], "PAMI.AssociationRules.basic.RuleMiner.Lift": [[2, 2, 1, "", "run"]], "PAMI.AssociationRules.basic.RuleMiner.RuleMiner": [[2, 2, 1, "", "getMemoryRSS"], [2, 2, 1, "", "getMemoryUSS"], [2, 2, 1, "", "getPatterns"], [2, 2, 1, "", "getPatternsAsDataFrame"], [2, 2, 1, "", "getRuntime"], [2, 2, 1, "", "mine"], [2, 2, 1, "", "printResults"], [2, 2, 1, "", "save"], [2, 2, 1, "", "startMine"]], "PAMI.correlatedPattern": [[4, 0, 0, "-", "basic"]], "PAMI.correlatedPattern.basic": [[4, 0, 0, "-", "CoMine"], [4, 0, 0, "-", "CoMinePlus"], [4, 0, 0, "-", "abstract"]], "PAMI.correlatedPattern.basic.CoMine": [[4, 1, 1, "", "CoMine"]], "PAMI.correlatedPattern.basic.CoMine.CoMine": [[4, 2, 1, "", "getMemoryRSS"], [4, 2, 1, "", "getMemoryUSS"], [4, 2, 1, "", "getPatterns"], [4, 2, 1, "", "getPatternsAsDataFrame"], [4, 2, 1, "", "getRuntime"], [4, 2, 1, "", "mine"], [4, 2, 1, "", "printResults"], [4, 2, 1, "", "save"], [4, 2, 1, "", "startMine"]], "PAMI.correlatedPattern.basic.CoMinePlus": [[4, 1, 1, "", "CoMinePlus"]], "PAMI.correlatedPattern.basic.CoMinePlus.CoMinePlus": [[4, 2, 1, "", "getMemoryRSS"], [4, 2, 1, "", "getMemoryUSS"], [4, 2, 1, "", "getPatterns"], [4, 2, 1, "", "getPatternsAsDataFrame"], [4, 2, 1, "", "getRuntime"], [4, 2, 1, "", "mine"], [4, 2, 1, "", "printResults"], [4, 2, 1, "", "save"], [4, 2, 1, "", "startMine"]], "PAMI.coveragePattern": [[6, 0, 0, "-", "basic"]], "PAMI.coveragePattern.basic": [[6, 0, 0, "-", "CMine"], [6, 0, 0, "-", "CPPG"], [6, 0, 0, "-", "abstract"]], "PAMI.coveragePattern.basic.CMine": [[6, 1, 1, "", "CMine"]], "PAMI.coveragePattern.basic.CMine.CMine": [[6, 2, 1, "", "creatingCoverageItems"], [6, 2, 1, "", "genPatterns"], [6, 2, 1, "", "generateAllPatterns"], [6, 2, 1, "", "getMemoryRSS"], [6, 2, 1, "", "getMemoryUSS"], [6, 2, 1, "", "getPatterns"], [6, 2, 1, "", "getPatternsAsDataFrame"], [6, 2, 1, "", "getRuntime"], [6, 2, 1, "", "mine"], [6, 2, 1, "", "printResults"], [6, 2, 1, "", "save"], [6, 2, 1, "", "startMine"], [6, 2, 1, "", "tidToBitset"]], "PAMI.coveragePattern.basic.CPPG": [[6, 1, 1, "", "CPPG"]], "PAMI.coveragePattern.basic.CPPG.CPPG": [[6, 2, 1, "", "getMemoryRSS"], [6, 2, 1, "", "getMemoryUSS"], [6, 2, 1, "", "getPatterns"], [6, 2, 1, "", "getPatternsAsDataFrame"], [6, 2, 1, "", "getRuntime"], [6, 2, 1, "", "mine"], [6, 2, 1, "", "printResults"], [6, 2, 1, "", "save"], [6, 2, 1, "", "startMine"]], "PAMI.extras": [[8, 0, 0, "-", "DF2DB"], [9, 0, 0, "-", "calculateMISValues"], [10, 0, 0, "-", "dbStats"], [11, 0, 0, "-", "fuzzyTransformation"], [12, 0, 0, "-", "generateDatabase"], [7, 0, 0, "-", "generateLatexGraphFile"], [13, 0, 0, "-", "graph"], [14, 0, 0, "-", "image2Database"], [15, 0, 0, "-", "imageProcessing"], [16, 0, 0, "-", "messaging"], [17, 0, 0, "-", "neighbours"], [7, 0, 0, "-", "plotPointOnMap"], [7, 0, 0, "-", "plotPointOnMap_dump"], [18, 0, 0, "-", "sampleDatasets"], [7, 0, 0, "-", "scatterPlotSpatialPoints"], [19, 0, 0, "-", "stats"], [20, 0, 0, "-", "syntheticDataGenerator"], [7, 0, 0, "-", "topKPatterns"], [7, 0, 0, "-", "uncertaindb_convert"], [21, 0, 0, "-", "visualize"]], "PAMI.extras.DF2DB": [[8, 0, 0, "-", "DF2DB"], [8, 0, 0, "-", "DenseFormatDF"], [8, 0, 0, "-", "SparseFormatDF"], [8, 0, 0, "-", "createTDB"], [8, 0, 0, "-", "denseDF2DBPlus"], [8, 0, 0, "-", "denseDF2DB_dump"], [8, 0, 0, "-", "sparseDF2DBPlus"]], "PAMI.extras.DF2DB.DF2DB": [[8, 1, 1, "", "DF2DB"]], "PAMI.extras.DF2DB.DF2DB.DF2DB": [[8, 2, 1, "", "getTemporalDatabase"], [8, 2, 1, "", "getTransactionalDatabase"], [8, 2, 1, "", "getUtilityDatabase"]], "PAMI.extras.DF2DB.DenseFormatDF": [[8, 1, 1, "", "DenseFormatDF"]], "PAMI.extras.DF2DB.DenseFormatDF.DenseFormatDF": [[8, 2, 1, "", "convert2MultipleTimeSeries"], [8, 2, 1, "", "convert2TemporalDatabase"], [8, 2, 1, "", "convert2TransactionalDatabase"], [8, 2, 1, "", "convert2UncertainTransactional"], [8, 2, 1, "", "convert2UtilityDatabase"], [8, 2, 1, "", "getFileName"]], "PAMI.extras.DF2DB.SparseFormatDF": [[8, 1, 1, "", "SparseFormatDF"]], "PAMI.extras.DF2DB.SparseFormatDF.SparseFormatDF": [[8, 2, 1, "", "createTemporal"], [8, 2, 1, "", "createTransactional"], [8, 2, 1, "", "createUtility"], [8, 2, 1, "", "getFileName"]], "PAMI.extras.DF2DB.createTDB": [[8, 1, 1, "", "createTDB"]], "PAMI.extras.DF2DB.createTDB.createTDB": [[8, 2, 1, "", "createTDB"], [8, 2, 1, "", "save"]], "PAMI.extras.DF2DB.denseDF2DBPlus": [[8, 1, 1, "", "DenseFormatDFPlus"]], "PAMI.extras.DF2DB.denseDF2DBPlus.DenseFormatDFPlus": [[8, 2, 1, "", "createTemporal"], [8, 2, 1, "", "createTransactional"], [8, 2, 1, "", "createUtility"], [8, 2, 1, "", "getFileName"]], "PAMI.extras.DF2DB.denseDF2DB_dump": [[8, 1, 1, "", "DenseFormatDF"]], "PAMI.extras.DF2DB.denseDF2DB_dump.DenseFormatDF": [[8, 2, 1, "", "createTemporal"], [8, 2, 1, "", "createTransactional"], [8, 2, 1, "", "createUtility"], [8, 2, 1, "", "getFileName"]], "PAMI.extras.DF2DB.sparseDF2DBPlus": [[8, 1, 1, "", "SparseFormatDFPlus"]], "PAMI.extras.DF2DB.sparseDF2DBPlus.SparseFormatDFPlus": [[8, 2, 1, "", "createTemporal"], [8, 2, 1, "", "createTransactional"], [8, 2, 1, "", "createUtility"], [8, 2, 1, "", "getFileName"]], "PAMI.extras.calculateMISValues": [[9, 0, 0, "-", "usingBeta"], [9, 0, 0, "-", "usingSD"]], "PAMI.extras.calculateMISValues.usingBeta": [[9, 1, 1, "", "usingBeta"]], "PAMI.extras.calculateMISValues.usingBeta.usingBeta": [[9, 2, 1, "", "calculateMIS"], [9, 2, 1, "", "getMISDataFrame"], [9, 2, 1, "", "save"]], "PAMI.extras.calculateMISValues.usingSD": [[9, 1, 1, "", "usingSD"]], "PAMI.extras.calculateMISValues.usingSD.usingSD": [[9, 2, 1, "", "calculateMIS"], [9, 2, 1, "", "getDataFrame"], [9, 2, 1, "", "save"]], "PAMI.extras.dbStats": [[10, 0, 0, "-", "FuzzyDatabase"], [10, 0, 0, "-", "MultipleTimeSeriesFuzzyDatabaseStats"], [10, 0, 0, "-", "SequentialDatabase"], [10, 0, 0, "-", "TemporalDatabase"], [10, 0, 0, "-", "TransactionalDatabase"], [10, 0, 0, "-", "UncertainTemporalDatabase"], [10, 0, 0, "-", "UncertainTransactionalDatabase"], [10, 0, 0, "-", "UtilityDatabase"]], "PAMI.extras.dbStats.FuzzyDatabase": [[10, 1, 1, "", "FuzzyDatabase"]], "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase": [[10, 2, 1, "", "creatingItemSets"], [10, 2, 1, "", "getAverageTransactionLength"], [10, 2, 1, "", "getAverageUtility"], [10, 2, 1, "", "getDatabaseSize"], [10, 2, 1, "", "getFrequenciesInRange"], [10, 2, 1, "", "getMaximumTransactionLength"], [10, 2, 1, "", "getMaximumUtility"], [10, 2, 1, "", "getMinimumTransactionLength"], [10, 2, 1, "", "getMinimumUtility"], [10, 2, 1, "", "getNumberOfItems"], [10, 2, 1, "", "getSortedListOfItemFrequencies"], [10, 2, 1, "", "getSortedUtilityValuesOfItem"], [10, 2, 1, "", "getSparsity"], [10, 2, 1, "", "getStandardDeviationTransactionLength"], [10, 2, 1, "", "getTotalNumberOfItems"], [10, 2, 1, "", "getTotalUtility"], [10, 2, 1, "", "getTransanctionalLengthDistribution"], [10, 2, 1, "", "getVarianceTransactionLength"], [10, 2, 1, "", "plotGraphs"], [10, 2, 1, "", "printStats"], [10, 2, 1, "", "readDatabase"], [10, 2, 1, "", "run"], [10, 2, 1, "", "save"]], "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats": [[10, 1, 1, "", "MultipleTimeSeriesFuzzyDatabaseStats"]], "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats": [[10, 2, 1, "", "convertDataIntoMatrix"], [10, 2, 1, "", "getAverageTransactionLength"], [10, 2, 1, "", "getDatabaseSize"], [10, 2, 1, "", "getDensity"], [10, 2, 1, "", "getFrequenciesInRange"], [10, 2, 1, "", "getMaximumTransactionLength"], [10, 2, 1, "", "getMinimumTransactionLength"], [10, 2, 1, "", "getNumberOfItems"], [10, 2, 1, "", "getSortedListOfItemFrequencies"], [10, 2, 1, "", "getSparsity"], [10, 2, 1, "", "getStandardDeviationTransactionLength"], [10, 2, 1, "", "getTotalNumberOfItems"], [10, 2, 1, "", "getTransanctionalLengthDistribution"], [10, 2, 1, "", "getVarianceTransactionLength"], [10, 2, 1, "", "plotGraphs"], [10, 2, 1, "", "printStats"], [10, 2, 1, "", "readDatabase"], [10, 2, 1, "", "run"], [10, 2, 1, "", "save"]], "PAMI.extras.dbStats.SequentialDatabase": [[10, 1, 1, "", "SequentialDatabase"]], "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase": [[10, 2, 1, "", "getAverageItemPerSequenceLength"], [10, 2, 1, "", "getAverageItemPerSubsequenceLength"], [10, 2, 1, "", "getAverageSubsequencePerSequenceLength"], [10, 2, 1, "", "getDatabaseSize"], [10, 2, 1, "", "getFrequenciesInRange"], [10, 2, 1, "", "getMaximumSequenceLength"], [10, 2, 1, "", "getMaximumSubsequenceLength"], [10, 2, 1, "", "getMinimumSequenceLength"], [10, 2, 1, "", "getMinimumSubsequenceLength"], [10, 2, 1, "", "getSequenceSize"], [10, 2, 1, "", "getSequencialLengthDistribution"], [10, 2, 1, "", "getSortedListOfItemFrequencies"], [10, 2, 1, "", "getStandardDeviationSequenceLength"], [10, 2, 1, "", "getStandardDeviationSubsequenceLength"], [10, 2, 1, "", "getSubsequencialLengthDistribution"], [10, 2, 1, "", "getTotalNumberOfItems"], [10, 2, 1, "", "getVarianceSequenceLength"], [10, 2, 1, "", "getVarianceSubsequenceLength"], [10, 2, 1, "", "plotGraphs"], [10, 2, 1, "", "printStats"], [10, 2, 1, "", "readDatabase"], [10, 2, 1, "", "run"]], "PAMI.extras.dbStats.TemporalDatabase": [[10, 1, 1, "", "TemporalDatabase"]], "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase": [[10, 2, 1, "", "convertDataIntoMatrix"], [10, 2, 1, "", "getAverageInterArrivalPeriod"], [10, 2, 1, "", "getAveragePeriodOfItem"], [10, 2, 1, "", "getAverageTransactionLength"], [10, 2, 1, "", "getDatabaseSize"], [10, 2, 1, "", "getDensity"], [10, 2, 1, "", "getFrequenciesInRange"], [10, 2, 1, "", "getMaximumInterArrivalPeriod"], [10, 2, 1, "", "getMaximumPeriodOfItem"], [10, 2, 1, "", "getMaximumTransactionLength"], [10, 2, 1, "", "getMinimumInterArrivalPeriod"], [10, 2, 1, "", "getMinimumPeriodOfItem"], [10, 2, 1, "", "getMinimumTransactionLength"], [10, 2, 1, "", "getNumberOfTransactionsPerTimestamp"], [10, 2, 1, "", "getPeriodsInRange"], [10, 2, 1, "", "getSortedListOfItemFrequencies"], [10, 2, 1, "", "getSparsity"], [10, 2, 1, "", "getStandardDeviationPeriod"], [10, 2, 1, "", "getStandardDeviationTransactionLength"], [10, 2, 1, "", "getTotalNumberOfItems"], [10, 2, 1, "", "getTransanctionalLengthDistribution"], [10, 2, 1, "", "getVarianceTransactionLength"], [10, 2, 1, "", "plotGraphs"], [10, 2, 1, "", "printStats"], [10, 2, 1, "", "readDatabase"], [10, 2, 1, "", "run"], [10, 2, 1, "", "save"]], "PAMI.extras.dbStats.TransactionalDatabase": [[10, 1, 1, "", "TransactionalDatabase"]], "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase": [[10, 2, 1, "", "convertDataIntoMatrix"], [10, 2, 1, "", "getAverageTransactionLength"], [10, 2, 1, "", "getDatabaseSize"], [10, 2, 1, "", "getDensity"], [10, 2, 1, "", "getFrequenciesInRange"], [10, 2, 1, "", "getMaximumTransactionLength"], [10, 2, 1, "", "getMinimumTransactionLength"], [10, 2, 1, "", "getNumberOfItems"], [10, 2, 1, "", "getSortedListOfItemFrequencies"], [10, 2, 1, "", "getSparsity"], [10, 2, 1, "", "getStandardDeviationTransactionLength"], [10, 2, 1, "", "getTotalNumberOfItems"], [10, 2, 1, "", "getTransanctionalLengthDistribution"], [10, 2, 1, "", "getVarianceTransactionLength"], [10, 2, 1, "", "plotGraphs"], [10, 2, 1, "", "printStats"], [10, 2, 1, "", "readDatabase"], [10, 2, 1, "", "run"], [10, 2, 1, "", "save"]], "PAMI.extras.dbStats.UncertainTemporalDatabase": [[10, 1, 1, "", "UncertainTemporalDatabase"]], "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase": [[10, 2, 1, "", "convertDataIntoMatrix"], [10, 2, 1, "", "getAveragePeriod"], [10, 2, 1, "", "getAverageTransactionLength"], [10, 2, 1, "", "getDatabaseSize"], [10, 2, 1, "", "getDensity"], [10, 2, 1, "", "getFrequenciesInRange"], [10, 2, 1, "", "getMaximumPeriod"], [10, 2, 1, "", "getMaximumTransactionLength"], [10, 2, 1, "", "getMinimumPeriod"], [10, 2, 1, "", "getMinimumTransactionLength"], [10, 2, 1, "", "getNumberOfTransactionsPerTimestamp"], [10, 2, 1, "", "getSortedListOfItemFrequencies"], [10, 2, 1, "", "getSparsity"], [10, 2, 1, "", "getStandardDeviationPeriod"], [10, 2, 1, "", "getStandardDeviationTransactionLength"], [10, 2, 1, "", "getTotalNumberOfItems"], [10, 2, 1, "", "getTransanctionalLengthDistribution"], [10, 2, 1, "", "getVarianceTransactionLength"], [10, 2, 1, "", "plotGraphs"], [10, 2, 1, "", "printStats"], [10, 2, 1, "", "readDatabase"], [10, 2, 1, "", "run"], [10, 2, 1, "", "save"]], "PAMI.extras.dbStats.UncertainTransactionalDatabase": [[10, 1, 1, "", "UncertainTransactionalDatabase"]], "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase": [[10, 2, 1, "", "convertDataIntoMatrix"], [10, 2, 1, "", "getAverageTransactionLength"], [10, 2, 1, "", "getDatabaseSize"], [10, 2, 1, "", "getDensity"], [10, 2, 1, "", "getFrequenciesInRange"], [10, 2, 1, "", "getMaximumTransactionLength"], [10, 2, 1, "", "getMinimumTransactionLength"], [10, 2, 1, "", "getNumberOfItems"], [10, 2, 1, "", "getSortedListOfItemFrequencies"], [10, 2, 1, "", "getSparsity"], [10, 2, 1, "", "getStandardDeviationTransactionLength"], [10, 2, 1, "", "getTotalNumberOfItems"], [10, 2, 1, "", "getTransanctionalLengthDistribution"], [10, 2, 1, "", "getVarianceTransactionLength"], [10, 2, 1, "", "plotGraphs"], [10, 2, 1, "", "printStats"], [10, 2, 1, "", "readDatabase"], [10, 2, 1, "", "run"], [10, 2, 1, "", "save"]], "PAMI.extras.dbStats.UtilityDatabase": [[10, 1, 1, "", "UtilityDatabase"]], "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase": [[10, 2, 1, "", "creatingItemSets"], [10, 2, 1, "", "getAverageTransactionLength"], [10, 2, 1, "", "getAverageUtility"], [10, 2, 1, "", "getDatabaseSize"], [10, 2, 1, "", "getFrequenciesInRange"], [10, 2, 1, "", "getMaximumTransactionLength"], [10, 2, 1, "", "getMaximumUtility"], [10, 2, 1, "", "getMinimumTransactionLength"], [10, 2, 1, "", "getMinimumUtility"], [10, 2, 1, "", "getNumberOfItems"], [10, 2, 1, "", "getSortedListOfItemFrequencies"], [10, 2, 1, "", "getSortedUtilityValuesOfItem"], [10, 2, 1, "", "getSparsity"], [10, 2, 1, "", "getStandardDeviationTransactionLength"], [10, 2, 1, "", "getTotalNumberOfItems"], [10, 2, 1, "", "getTotalUtility"], [10, 2, 1, "", "getTransanctionalLengthDistribution"], [10, 2, 1, "", "getVarianceTransactionLength"], [10, 2, 1, "", "plotGraphs"], [10, 2, 1, "", "printStats"], [10, 2, 1, "", "readDatabase"], [10, 2, 1, "", "run"], [10, 2, 1, "", "save"]], "PAMI.extras.fuzzyTransformation": [[11, 0, 0, "-", "abstract"], [11, 0, 0, "-", "temporalToFuzzy"], [11, 0, 0, "-", "transactionalToFuzzy"]], "PAMI.extras.fuzzyTransformation.temporalToFuzzy": [[11, 1, 1, "", "temporalToFuzzy"]], "PAMI.extras.fuzzyTransformation.temporalToFuzzy.temporalToFuzzy": [[11, 2, 1, "", "startConvert"]], "PAMI.extras.fuzzyTransformation.transactionalToFuzzy": [[11, 1, 1, "", "transactionalToFuzzy"]], "PAMI.extras.fuzzyTransformation.transactionalToFuzzy.transactionalToFuzzy": [[11, 2, 1, "", "startConvert"]], "PAMI.extras.generateDatabase": [[12, 0, 0, "-", "generateSpatioTemporalDatabase"], [12, 0, 0, "-", "generateTemporalDatabase"], [12, 0, 0, "-", "generateTransactionalDatabase"]], "PAMI.extras.generateDatabase.generateSpatioTemporalDatabase": [[12, 1, 1, "", "spatioTemporalDatabaseGenerator"]], "PAMI.extras.generateDatabase.generateSpatioTemporalDatabase.spatioTemporalDatabaseGenerator": [[12, 3, 1, "", "alreadyAdded"], [12, 3, 1, "", "coinFlip"], [12, 2, 1, "", "createPoint"], [12, 3, 1, "", "items"], [12, 3, 1, "", "outFileName"], [12, 2, 1, "", "saveAsFile"], [12, 3, 1, "", "timestamp"]], "PAMI.extras.generateDatabase.generateTemporalDatabase": [[12, 1, 1, "", "generateTemporalDatabase"]], "PAMI.extras.generateDatabase.generateTemporalDatabase.generateTemporalDatabase": [[12, 2, 1, "", "createTemporalFile"], [12, 2, 1, "", "getDatabaseAsDataFrame"], [12, 2, 1, "", "getFileName"], [12, 2, 1, "", "performCoinFlip"], [12, 2, 1, "", "tuning"]], "PAMI.extras.generateDatabase.generateTransactionalDatabase": [[12, 1, 1, "", "generateTransactionalDatabase"]], "PAMI.extras.generateDatabase.generateTransactionalDatabase.generateTransactionalDatabase": [[12, 2, 1, "", "create"], [12, 2, 1, "", "generateArray"], [12, 2, 1, "", "getTransactions"], [12, 2, 1, "", "save"], [12, 2, 1, "", "tuning"]], "PAMI.extras.generateLatexGraphFile": [[7, 4, 1, "", "generateLatexCode"], [7, 1, 1, "", "generateLatexGraphFile"]], "PAMI.extras.graph": [[13, 0, 0, "-", "DF2Fig"], [13, 0, 0, "-", "plotLineGraphFromDictionary"], [13, 0, 0, "-", "plotLineGraphsFromDataFrame"], [13, 0, 0, "-", "visualizeFuzzyPatterns"], [13, 0, 0, "-", "visualizePatterns"]], "PAMI.extras.graph.DF2Fig": [[13, 1, 1, "", "DF2Fig"]], "PAMI.extras.graph.DF2Fig.DF2Fig": [[13, 2, 1, "", "plot"]], "PAMI.extras.graph.plotLineGraphFromDictionary": [[13, 1, 1, "", "plotLineGraphFromDictionary"]], "PAMI.extras.graph.plotLineGraphsFromDataFrame": [[13, 1, 1, "", "plotGraphsFromDataFrame"]], "PAMI.extras.graph.plotLineGraphsFromDataFrame.plotGraphsFromDataFrame": [[13, 2, 1, "", "plotGraphsFromDataFrame"]], "PAMI.extras.graph.visualizeFuzzyPatterns": [[13, 1, 1, "", "visualizeFuzzyPatterns"]], "PAMI.extras.graph.visualizeFuzzyPatterns.visualizeFuzzyPatterns": [[13, 2, 1, "", "visualize"]], "PAMI.extras.graph.visualizePatterns": [[13, 1, 1, "", "visualizePatterns"]], "PAMI.extras.graph.visualizePatterns.visualizePatterns": [[13, 2, 1, "", "visualize"]], "PAMI.extras.imageProcessing": [[15, 0, 0, "-", "imagery2Databases"]], "PAMI.extras.imageProcessing.imagery2Databases": [[15, 1, 1, "", "createDatabase"]], "PAMI.extras.imageProcessing.imagery2Databases.createDatabase": [[15, 2, 1, "", "getDataFrame"], [15, 2, 1, "", "saveAsTemporalDB"], [15, 2, 1, "", "saveAsTransactionalDB"], [15, 2, 1, "", "saveAsUncertainTemporalDB"], [15, 2, 1, "", "saveAsUncertainTransactionalDB"], [15, 2, 1, "", "saveAsUtilityTemporalDB"], [15, 2, 1, "", "saveAsUtilityTransactionalDB"]], "PAMI.extras.messaging": [[16, 0, 0, "-", "discord"], [16, 0, 0, "-", "gmail"]], "PAMI.extras.messaging.discord": [[16, 1, 1, "", "discord"]], "PAMI.extras.messaging.discord.discord": [[16, 2, 1, "", "send"]], "PAMI.extras.messaging.gmail": [[16, 1, 1, "", "gmail"]], "PAMI.extras.messaging.gmail.gmail": [[16, 2, 1, "", "send"]], "PAMI.extras.neighbours": [[17, 0, 0, "-", "findNeighborsUsingEuclideanDistanceforPointInfo"], [17, 0, 0, "-", "findNeighboursUsingEuclidean"], [17, 0, 0, "-", "findNeighboursUsingGeodesic"]], "PAMI.extras.neighbours.findNeighborsUsingEuclideanDistanceforPointInfo": [[17, 1, 1, "", "createNeighborhoodFileUsingEuclideanDistance"]], "PAMI.extras.neighbours.findNeighborsUsingEuclideanDistanceforPointInfo.createNeighborhoodFileUsingEuclideanDistance": [[17, 2, 1, "", "getFileName"]], "PAMI.extras.neighbours.findNeighboursUsingEuclidean": [[17, 1, 1, "", "createNeighborhoodFileUsingEuclideanDistance"]], "PAMI.extras.neighbours.findNeighboursUsingEuclidean.createNeighborhoodFileUsingEuclideanDistance": [[17, 2, 1, "", "getFileName"]], "PAMI.extras.neighbours.findNeighboursUsingGeodesic": [[17, 1, 1, "", "createNeighborhoodFileUsingGeodesicDistance"]], "PAMI.extras.neighbours.findNeighboursUsingGeodesic.createNeighborhoodFileUsingGeodesicDistance": [[17, 2, 1, "", "getFileName"]], "PAMI.extras.plotPointOnMap": [[7, 1, 1, "", "plotPointOnMap"]], "PAMI.extras.plotPointOnMap.plotPointOnMap": [[7, 2, 1, "", "convertPOINT"], [7, 2, 1, "", "findTopKPatterns"], [7, 2, 1, "", "plotPointInMap"]], "PAMI.extras.plotPointOnMap_dump": [[7, 1, 1, "", "plotPointOnMap"]], "PAMI.extras.plotPointOnMap_dump.plotPointOnMap": [[7, 2, 1, "", "convertPOINT"], [7, 2, 1, "", "findTopKPatterns"], [7, 2, 1, "", "plotPointInMap"]], "PAMI.extras.scatterPlotSpatialPoints": [[7, 1, 1, "", "scatterPlotSpatialPoints"]], "PAMI.extras.scatterPlotSpatialPoints.scatterPlotSpatialPoints": [[7, 2, 1, "", "scatterPlotSpatialPoints"]], "PAMI.extras.stats": [[19, 0, 0, "-", "TransactionalDatabase"], [19, 0, 0, "-", "graphDatabase"], [19, 0, 0, "-", "sequentialDatabase"], [19, 0, 0, "-", "temporalDatabase"], [19, 0, 0, "-", "utilityDatabase"]], "PAMI.extras.stats.TransactionalDatabase": [[19, 1, 1, "", "TransactionalDatabase"]], "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase": [[19, 2, 1, "", "convertDataIntoMatrix"], [19, 2, 1, "", "getAverageTransactionLength"], [19, 2, 1, "", "getDatabaseSize"], [19, 2, 1, "", "getDensity"], [19, 2, 1, "", "getFrequenciesInRange"], [19, 2, 1, "", "getMaximumTransactionLength"], [19, 2, 1, "", "getMinimumTransactionLength"], [19, 2, 1, "", "getNumberOfItems"], [19, 2, 1, "", "getSortedListOfItemFrequencies"], [19, 2, 1, "", "getSparsity"], [19, 2, 1, "", "getStandardDeviationTransactionLength"], [19, 2, 1, "", "getTotalNumberOfItems"], [19, 2, 1, "", "getTransanctionalLengthDistribution"], [19, 2, 1, "", "getVarianceTransactionLength"], [19, 2, 1, "", "plotGraphs"], [19, 2, 1, "", "printStats"], [19, 2, 1, "", "run"], [19, 2, 1, "", "save"]], "PAMI.extras.stats.graphDatabase": [[19, 1, 1, "", "graphDatabase"]], "PAMI.extras.stats.graphDatabase.graphDatabase": [[19, 2, 1, "", "plotEdgeDistribution"], [19, 2, 1, "", "plotNodeDistribution"], [19, 2, 1, "", "printGraphDatabaseStatistics"], [19, 2, 1, "", "printIndividualGraphStats"]], "PAMI.extras.stats.sequentialDatabase": [[19, 1, 1, "", "sequentialDatabase"]], "PAMI.extras.stats.sequentialDatabase.sequentialDatabase": [[19, 2, 1, "", "getAverageItemPerSequenceLength"], [19, 2, 1, "", "getAverageItemPerSubsequenceLength"], [19, 2, 1, "", "getAverageSubsequencePerSequenceLength"], [19, 2, 1, "", "getDatabaseSize"], [19, 2, 1, "", "getFrequenciesInRange"], [19, 2, 1, "", "getMaximumSequenceLength"], [19, 2, 1, "", "getMaximumSubsequenceLength"], [19, 2, 1, "", "getMinimumSequenceLength"], [19, 2, 1, "", "getMinimumSubsequenceLength"], [19, 2, 1, "", "getSequenceSize"], [19, 2, 1, "", "getSequencialLengthDistribution"], [19, 2, 1, "", "getSortedListOfItemFrequencies"], [19, 2, 1, "", "getStandardDeviationSequenceLength"], [19, 2, 1, "", "getStandardDeviationSubsequenceLength"], [19, 2, 1, "", "getSubsequencialLengthDistribution"], [19, 2, 1, "", "getTotalNumberOfItems"], [19, 2, 1, "", "getVarianceSequenceLength"], [19, 2, 1, "", "getVarianceSubsequenceLength"], [19, 2, 1, "", "plotGraphs"], [19, 2, 1, "", "printStats"], [19, 2, 1, "", "readDatabase"], [19, 2, 1, "", "run"]], "PAMI.extras.stats.temporalDatabase": [[19, 1, 1, "", "temporalDatabase"]], "PAMI.extras.stats.temporalDatabase.temporalDatabase": [[19, 2, 1, "", "convertDataIntoMatrix"], [19, 2, 1, "", "getAverageInterArrivalPeriod"], [19, 2, 1, "", "getAveragePeriodOfItem"], [19, 2, 1, "", "getAverageTransactionLength"], [19, 2, 1, "", "getDatabaseSize"], [19, 2, 1, "", "getDensity"], [19, 2, 1, "", "getFrequenciesInRange"], [19, 2, 1, "", "getMaximumInterArrivalPeriod"], [19, 2, 1, "", "getMaximumPeriodOfItem"], [19, 2, 1, "", "getMaximumTransactionLength"], [19, 2, 1, "", "getMinimumInterArrivalPeriod"], [19, 2, 1, "", "getMinimumPeriodOfItem"], [19, 2, 1, "", "getMinimumTransactionLength"], [19, 2, 1, "", "getNumberOfTransactionsPerTimestamp"], [19, 2, 1, "", "getPeriodsInRange"], [19, 2, 1, "", "getSortedListOfItemFrequencies"], [19, 2, 1, "", "getSparsity"], [19, 2, 1, "", "getStandardDeviationPeriod"], [19, 2, 1, "", "getStandardDeviationTransactionLength"], [19, 2, 1, "", "getTotalNumberOfItems"], [19, 2, 1, "", "getTransanctionalLengthDistribution"], [19, 2, 1, "", "getVarianceTransactionLength"], [19, 2, 1, "", "plotGraphs"], [19, 2, 1, "", "printStats"], [19, 2, 1, "", "readDatabase"], [19, 2, 1, "", "run"], [19, 2, 1, "", "save"]], "PAMI.extras.stats.utilityDatabase": [[19, 1, 1, "", "utilityDatabase"]], "PAMI.extras.stats.utilityDatabase.utilityDatabase": [[19, 2, 1, "", "creatingItemSets"], [19, 2, 1, "", "getAverageTransactionLength"], [19, 2, 1, "", "getAverageUtility"], [19, 2, 1, "", "getDatabaseSize"], [19, 2, 1, "", "getFrequenciesInRange"], [19, 2, 1, "", "getMaximumTransactionLength"], [19, 2, 1, "", "getMaximumUtility"], [19, 2, 1, "", "getMinimumTransactionLength"], [19, 2, 1, "", "getMinimumUtility"], [19, 2, 1, "", "getNumberOfItems"], [19, 2, 1, "", "getSortedListOfItemFrequencies"], [19, 2, 1, "", "getSortedUtilityValuesOfItem"], [19, 2, 1, "", "getSparsity"], [19, 2, 1, "", "getStandardDeviationTransactionLength"], [19, 2, 1, "", "getTotalNumberOfItems"], [19, 2, 1, "", "getTotalUtility"], [19, 2, 1, "", "getTransanctionalLengthDistribution"], [19, 2, 1, "", "getVarianceTransactionLength"], [19, 2, 1, "", "plotGraphs"], [19, 2, 1, "", "printStats"], [19, 2, 1, "", "readDatabase"], [19, 2, 1, "", "run"], [19, 2, 1, "", "save"]], "PAMI.extras.syntheticDataGenerator": [[20, 0, 0, "-", "TemporalDatabase"], [20, 0, 0, "-", "TransactionalDatabase"], [20, 0, 0, "-", "createSyntheticGeoreferentialTemporal"], [20, 0, 0, "-", "createSyntheticGeoreferentialTransactions"], [20, 0, 0, "-", "createSyntheticGeoreferentialUncertainTransaction"], [20, 0, 0, "-", "createSyntheticTemporal"], [20, 0, 0, "-", "createSyntheticTransactions"], [20, 0, 0, "-", "createSyntheticUncertainTemporal"], [20, 0, 0, "-", "createSyntheticUncertainTransactions"], [20, 0, 0, "-", "createSyntheticUtility"], [20, 0, 0, "-", "fuzzyDatabase"], [20, 0, 0, "-", "generateTemporal"], [20, 0, 0, "-", "generateTransactional"], [20, 0, 0, "-", "generateUncertainTemporal"], [20, 0, 0, "-", "generateUncertainTransactional"], [20, 0, 0, "-", "generateUtilityTemporal"], [20, 0, 0, "-", "generateUtilityTransactional"], [20, 0, 0, "-", "georeferencedTemporalDatabase"], [20, 0, 0, "-", "georeferencedTransactionalDatabase"], [20, 0, 0, "-", "syntheticUtilityDatabase"], [20, 0, 0, "-", "temporalDatabaseGen"], [20, 0, 0, "-", "utilityDatabase"]], "PAMI.extras.syntheticDataGenerator.TemporalDatabase": [[20, 1, 1, "", "TemporalDatabase"]], "PAMI.extras.syntheticDataGenerator.TemporalDatabase.TemporalDatabase": [[20, 2, 1, "", "create"], [20, 2, 1, "", "getDatabaseAsDataFrame"], [20, 2, 1, "", "getFileName"], [20, 2, 1, "", "performCoinFlip"], [20, 2, 1, "", "tuning"]], "PAMI.extras.syntheticDataGenerator.TransactionalDatabase": [[20, 1, 1, "", "TransactionalDatabase"]], "PAMI.extras.syntheticDataGenerator.TransactionalDatabase.TransactionalDatabase": [[20, 2, 1, "", "create"], [20, 2, 1, "", "generateArray"], [20, 2, 1, "", "getTransactions"], [20, 2, 1, "", "save"], [20, 2, 1, "", "tuning"]], "PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTemporal": [[20, 1, 1, "", "createGeoreferentialTemporalDatabase"]], "PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTemporal.createGeoreferentialTemporalDatabase": [[20, 2, 1, "", "createGeoreferentialTemporalDatabase"]], "PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTransactions": [[20, 1, 1, "", "createSyntheticGeoreferentialTransaction"]], "PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTransactions.createSyntheticGeoreferentialTransaction": [[20, 2, 1, "", "createGeoreferentialTransactionalDatabase"]], "PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialUncertainTransaction": [[20, 1, 1, "", "createSyntheticGeoreferentialUncertainTransaction"]], "PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialUncertainTransaction.createSyntheticGeoreferentialUncertainTransaction": [[20, 2, 1, "", "createGeoreferentialUncertainTransactionalDatabase"]], "PAMI.extras.syntheticDataGenerator.createSyntheticTemporal": [[20, 1, 1, "", "createSyntheticTemporal"]], "PAMI.extras.syntheticDataGenerator.createSyntheticTemporal.createSyntheticTemporal": [[20, 2, 1, "", "createTemporalDatabase"]], "PAMI.extras.syntheticDataGenerator.createSyntheticTransactions": [[20, 1, 1, "", "createSyntheticTransaction"]], "PAMI.extras.syntheticDataGenerator.createSyntheticTransactions.createSyntheticTransaction": [[20, 2, 1, "", "createTransactionalDatabase"]], "PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTemporal": [[20, 1, 1, "", "createSyntheticUncertainTemporal"]], "PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTemporal.createSyntheticUncertainTemporal": [[20, 2, 1, "", "createUncertainTemporalDatabase"]], "PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTransactions": [[20, 1, 1, "", "createSyntheticUncertainTransaction"]], "PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTransactions.createSyntheticUncertainTransaction": [[20, 2, 1, "", "createUncertainTransactionalDatabase"]], "PAMI.extras.syntheticDataGenerator.createSyntheticUtility": [[20, 1, 1, "", "createSyntheticUtility"]], "PAMI.extras.syntheticDataGenerator.createSyntheticUtility.createSyntheticUtility": [[20, 2, 1, "", "createUtilityDatabase"]], "PAMI.extras.syntheticDataGenerator.generateTemporal": [[20, 1, 1, "", "generateTemporal"]], "PAMI.extras.syntheticDataGenerator.generateTemporal.generateTemporal": [[20, 2, 1, "", "generate"], [20, 2, 1, "", "save"]], "PAMI.extras.syntheticDataGenerator.generateTransactional": [[20, 1, 1, "", "generateTransactional"]], "PAMI.extras.syntheticDataGenerator.generateTransactional.generateTransactional": [[20, 2, 1, "", "generate"], [20, 2, 1, "", "save"]], "PAMI.extras.syntheticDataGenerator.generateUncertainTemporal": [[20, 1, 1, "", "generateUncertainTemporal"]], "PAMI.extras.syntheticDataGenerator.generateUncertainTemporal.generateUncertainTemporal": [[20, 2, 1, "", "generate"], [20, 2, 1, "", "save"]], "PAMI.extras.syntheticDataGenerator.generateUncertainTransactional": [[20, 1, 1, "", "generateUncertainTransactional"]], "PAMI.extras.syntheticDataGenerator.generateUncertainTransactional.generateUncertainTransactional": [[20, 2, 1, "", "generate"], [20, 2, 1, "", "save"]], "PAMI.extras.syntheticDataGenerator.generateUtilityTemporal": [[20, 1, 1, "", "generateUtilityTemporal"]], "PAMI.extras.syntheticDataGenerator.generateUtilityTemporal.generateUtilityTemporal": [[20, 2, 1, "", "generate"], [20, 2, 1, "", "save"]], "PAMI.extras.syntheticDataGenerator.generateUtilityTransactional": [[20, 1, 1, "", "generateUtilityTransactional"]], "PAMI.extras.syntheticDataGenerator.generateUtilityTransactional.generateUtilityTransactional": [[20, 2, 1, "", "generate"], [20, 2, 1, "", "save"]], "PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase": [[20, 1, 1, "", "syntheticUtilityDatabase"]], "PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase.syntheticUtilityDatabase": [[20, 2, 1, "", "__init__"], [20, 3, 1, "", "avgTransactionLength"], [20, 2, 1, "id0", "createRandomNumbers"], [20, 2, 1, "id10", "createSyntheticUtilityDatabase"], [20, 3, 1, "", "maxUtilRange"], [20, 3, 1, "", "numOfItems"], [20, 2, 1, "id11", "save"], [20, 3, 1, "", "totalTransactions"]], "PAMI.extras.syntheticDataGenerator.temporalDatabaseGen": [[20, 1, 1, "", "CreateSyntheticTemporal"]], "PAMI.extras.syntheticDataGenerator.temporalDatabaseGen.CreateSyntheticTemporal": [[20, 3, 1, "", "avg_transaction_length"], [20, 2, 1, "id12", "create_temporal_database"], [20, 2, 1, "", "generate_random_numbers"], [20, 3, 1, "", "num_of_items"], [20, 3, 1, "", "total_transactions"]], "PAMI.extras.syntheticDataGenerator.utilityDatabase": [[20, 1, 1, "", "UtilityDataGenerator"]], "PAMI.extras.syntheticDataGenerator.utilityDatabase.UtilityDataGenerator": [[20, 2, 1, "", "Generate"], [20, 2, 1, "", "GenerateAndPrintItemPairs"], [20, 2, 1, "", "GenerateExternalUtilityData"], [20, 2, 1, "", "GetExternalUtilityData"], [20, 2, 1, "", "GetInternalUtilityData"], [20, 2, 1, "", "GetUtilityData"], [20, 2, 1, "", "Save"], [20, 2, 1, "", "SaveItemsInternalUtilityValues"], [20, 2, 1, "", "Saveitemsexternalutilityvalues"]], "PAMI.extras.topKPatterns": [[7, 1, 1, "", "topKPatterns"]], "PAMI.extras.topKPatterns.topKPatterns": [[7, 2, 1, "", "getTopKPatterns"], [7, 2, 1, "", "save"]], "PAMI.extras.uncertaindb_convert": [[7, 1, 1, "", "predictedClass2Transaction"]], "PAMI.extras.uncertaindb_convert.predictedClass2Transaction": [[7, 2, 1, "", "getBinaryTransaction"]], "PAMI.extras.visualize": [[21, 0, 0, "-", "graphs"]], "PAMI.extras.visualize.graphs": [[21, 1, 1, "", "graphDatabase"]], "PAMI.extras.visualize.graphs.graphDatabase": [[21, 2, 1, "", "plot"]], "PAMI.faultTolerantFrequentPattern": [[23, 0, 0, "-", "basic"]], "PAMI.faultTolerantFrequentPattern.basic": [[23, 0, 0, "-", "FTApriori"], [23, 0, 0, "-", "FTFPGrowth"], [23, 0, 0, "-", "abstract"]], "PAMI.faultTolerantFrequentPattern.basic.FTApriori": [[23, 1, 1, "", "FTApriori"]], "PAMI.faultTolerantFrequentPattern.basic.FTApriori.FTApriori": [[23, 2, 1, "", "getMemoryRSS"], [23, 2, 1, "", "getMemoryUSS"], [23, 2, 1, "", "getPatterns"], [23, 2, 1, "", "getPatternsAsDataFrame"], [23, 2, 1, "", "getRuntime"], [23, 2, 1, "", "mine"], [23, 2, 1, "", "printResults"], [23, 2, 1, "", "save"], [23, 2, 1, "", "startMine"]], "PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth": [[23, 1, 1, "", "FTFPGrowth"]], "PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth.FTFPGrowth": [[23, 2, 1, "", "getMemoryRSS"], [23, 2, 1, "", "getMemoryUSS"], [23, 2, 1, "", "getPatterns"], [23, 2, 1, "", "getPatternsAsDataFrame"], [23, 2, 1, "", "getRuntime"], [23, 2, 1, "", "mine"], [23, 2, 1, "", "printResults"], [23, 2, 1, "", "save"], [23, 2, 1, "", "startMine"]], "PAMI.frequentPattern": [[25, 0, 0, "-", "basic"], [26, 0, 0, "-", "closed"], [27, 0, 0, "-", "cuda"], [28, 0, 0, "-", "maximal"], [29, 0, 0, "-", "pyspark"], [30, 0, 0, "-", "topk"]], "PAMI.frequentPattern.basic": [[25, 0, 0, "-", "Apriori"], [25, 0, 0, "-", "ECLAT"], [25, 0, 0, "-", "ECLATDiffset"], [25, 0, 0, "-", "ECLATbitset"], [25, 0, 0, "-", "FPGrowth"], [25, 0, 0, "-", "abstract"]], "PAMI.frequentPattern.basic.Apriori": [[25, 1, 1, "", "Apriori"]], "PAMI.frequentPattern.basic.Apriori.Apriori": [[25, 2, 1, "", "getMemoryRSS"], [25, 2, 1, "", "getMemoryUSS"], [25, 2, 1, "", "getPatterns"], [25, 2, 1, "", "getPatternsAsDataFrame"], [25, 2, 1, "", "getRuntime"], [25, 2, 1, "", "mine"], [25, 2, 1, "", "printResults"], [25, 2, 1, "", "save"], [25, 2, 1, "", "startMine"]], "PAMI.frequentPattern.basic.ECLAT": [[25, 1, 1, "", "ECLAT"]], "PAMI.frequentPattern.basic.ECLAT.ECLAT": [[25, 2, 1, "", "getMemoryRSS"], [25, 2, 1, "", "getMemoryUSS"], [25, 2, 1, "", "getPatterns"], [25, 2, 1, "", "getPatternsAsDataFrame"], [25, 2, 1, "", "getRuntime"], [25, 2, 1, "", "mine"], [25, 2, 1, "", "printResults"], [25, 2, 1, "", "save"], [25, 2, 1, "", "startMine"]], "PAMI.frequentPattern.basic.ECLATDiffset": [[25, 1, 1, "", "ECLATDiffset"]], "PAMI.frequentPattern.basic.ECLATDiffset.ECLATDiffset": [[25, 2, 1, "", "getMemoryRSS"], [25, 2, 1, "", "getMemoryUSS"], [25, 2, 1, "", "getPatterns"], [25, 2, 1, "", "getPatternsAsDataFrame"], [25, 2, 1, "", "getRuntime"], [25, 2, 1, "", "mine"], [25, 2, 1, "", "printResults"], [25, 2, 1, "", "save"], [25, 2, 1, "", "startMine"]], "PAMI.frequentPattern.basic.ECLATbitset": [[25, 1, 1, "", "ECLATbitset"]], "PAMI.frequentPattern.basic.ECLATbitset.ECLATbitset": [[25, 2, 1, "", "getMemoryRSS"], [25, 2, 1, "", "getMemoryUSS"], [25, 2, 1, "", "getPatterns"], [25, 2, 1, "", "getPatternsAsDataFrame"], [25, 2, 1, "", "getRuntime"], [25, 2, 1, "", "mine"], [25, 2, 1, "", "printResults"], [25, 2, 1, "", "save"], [25, 2, 1, "", "startMine"]], "PAMI.frequentPattern.basic.FPGrowth": [[25, 1, 1, "", "FPGrowth"]], "PAMI.frequentPattern.basic.FPGrowth.FPGrowth": [[25, 2, 1, "", "getMemoryRSS"], [25, 2, 1, "", "getMemoryUSS"], [25, 2, 1, "", "getPatterns"], [25, 2, 1, "", "getPatternsAsDataFrame"], [25, 2, 1, "", "getRuntime"], [25, 2, 1, "", "mine"], [25, 2, 1, "", "printResults"], [25, 2, 1, "", "save"], [25, 2, 1, "", "startMine"]], "PAMI.frequentPattern.closed": [[26, 0, 0, "-", "CHARM"], [26, 0, 0, "-", "abstract"]], "PAMI.frequentPattern.closed.CHARM": [[26, 1, 1, "", "CHARM"]], "PAMI.frequentPattern.closed.CHARM.CHARM": [[26, 2, 1, "", "getMemoryRSS"], [26, 2, 1, "", "getMemoryUSS"], [26, 2, 1, "", "getPatterns"], [26, 2, 1, "", "getPatternsAsDataFrame"], [26, 2, 1, "", "getRuntime"], [26, 2, 1, "", "mine"], [26, 2, 1, "", "printResults"], [26, 2, 1, "", "save"], [26, 2, 1, "", "startMine"]], "PAMI.frequentPattern.maximal": [[28, 0, 0, "-", "MaxFPGrowth"], [28, 0, 0, "-", "abstract"]], "PAMI.frequentPattern.maximal.MaxFPGrowth": [[28, 1, 1, "", "MaxFPGrowth"]], "PAMI.frequentPattern.maximal.MaxFPGrowth.MaxFPGrowth": [[28, 2, 1, "", "getMemoryRSS"], [28, 2, 1, "", "getMemoryUSS"], [28, 2, 1, "", "getPatterns"], [28, 2, 1, "", "getPatternsAsDataFrame"], [28, 2, 1, "", "getRuntime"], [28, 2, 1, "", "mine"], [28, 2, 1, "", "printResults"], [28, 2, 1, "", "save"], [28, 2, 1, "", "startMine"]], "PAMI.frequentPattern.topk": [[30, 0, 0, "-", "FAE"], [30, 0, 0, "-", "abstract"]], "PAMI.frequentPattern.topk.FAE": [[30, 1, 1, "", "FAE"]], "PAMI.frequentPattern.topk.FAE.FAE": [[30, 2, 1, "", "getMemoryRSS"], [30, 2, 1, "", "getMemoryUSS"], [30, 2, 1, "", "getPatterns"], [30, 2, 1, "", "getPatternsAsDataFrame"], [30, 2, 1, "", "getRuntime"], [30, 2, 1, "", "mine"], [30, 2, 1, "", "printTOPK"], [30, 2, 1, "", "save"], [30, 2, 1, "", "startMine"]], "PAMI.fuzzyCorrelatedPattern": [[32, 0, 0, "-", "basic"]], "PAMI.fuzzyCorrelatedPattern.basic": [[32, 0, 0, "-", "FCPGrowth"], [32, 0, 0, "-", "abstract"]], "PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth": [[32, 1, 1, "", "Element"], [32, 1, 1, "", "FCPGrowth"], [32, 4, 1, "", "main"]], "PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth.FCPGrowth": [[32, 2, 1, "", "getMemoryRSS"], [32, 2, 1, "", "getMemoryUSS"], [32, 2, 1, "", "getPatterns"], [32, 2, 1, "", "getPatternsAsDataFrame"], [32, 2, 1, "", "getRuntime"], [32, 2, 1, "", "mine"], [32, 2, 1, "", "printResults"], [32, 2, 1, "", "save"], [32, 2, 1, "", "startMine"]], "PAMI.fuzzyFrequentPattern": [[34, 0, 0, "-", "basic"]], "PAMI.fuzzyFrequentPattern.basic": [[34, 0, 0, "-", "FFIMiner"], [34, 0, 0, "-", "FFIMiner_old"], [34, 0, 0, "-", "abstract"]], "PAMI.fuzzyFrequentPattern.basic.FFIMiner": [[34, 1, 1, "", "FFIMiner"]], "PAMI.fuzzyFrequentPattern.basic.FFIMiner.FFIMiner": [[34, 2, 1, "", "getMemoryRSS"], [34, 2, 1, "", "getMemoryUSS"], [34, 2, 1, "", "getPatterns"], [34, 2, 1, "", "getPatternsAsDataFrame"], [34, 2, 1, "", "getRuntime"], [34, 2, 1, "", "mine"], [34, 2, 1, "", "printResults"], [34, 2, 1, "", "save"], [34, 2, 1, "", "startMine"]], "PAMI.fuzzyFrequentPattern.basic.FFIMiner_old": [[34, 1, 1, "", "FFIMiner"]], "PAMI.fuzzyFrequentPattern.basic.FFIMiner_old.FFIMiner": [[34, 2, 1, "", "getMemoryRSS"], [34, 2, 1, "", "getMemoryUSS"], [34, 2, 1, "", "getPatterns"], [34, 2, 1, "", "getPatternsAsDataFrame"], [34, 2, 1, "", "getRuntime"], [34, 2, 1, "", "mine"], [34, 2, 1, "", "printResults"], [34, 2, 1, "", "save"], [34, 2, 1, "", "startMine"]], "PAMI.fuzzyGeoreferencedFrequentPattern": [[36, 0, 0, "-", "basic"]], "PAMI.fuzzyGeoreferencedFrequentPattern.basic": [[36, 0, 0, "-", "FFSPMiner"], [36, 0, 0, "-", "FFSPMiner_old"], [36, 0, 0, "-", "abstract"]], "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner": [[36, 1, 1, "", "FFSPMiner"]], "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner.FFSPMiner": [[36, 2, 1, "", "getMemoryRSS"], [36, 2, 1, "", "getMemoryUSS"], [36, 2, 1, "", "getPatterns"], [36, 2, 1, "", "getPatternsAsDataFrame"], [36, 2, 1, "", "getRuntime"], [36, 2, 1, "", "mine"], [36, 2, 1, "", "printResults"], [36, 2, 1, "", "save"], [36, 2, 1, "", "startMine"]], "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old": [[36, 1, 1, "", "FFSPMiner"]], "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old.FFSPMiner": [[36, 2, 1, "", "getMemoryRSS"], [36, 2, 1, "", "getMemoryUSS"], [36, 2, 1, "", "getPatterns"], [36, 2, 1, "", "getPatternsAsDataFrame"], [36, 2, 1, "", "getRuntime"], [36, 2, 1, "", "mine"], [36, 2, 1, "", "printResults"], [36, 2, 1, "", "save"], [36, 2, 1, "", "startMine"]], "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern": [[38, 0, 0, "-", "basic"]], "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic": [[38, 0, 0, "-", "FGPFPMiner"], [38, 0, 0, "-", "FGPFPMiner_old"], [38, 0, 0, "-", "abstract"]], "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner": [[38, 1, 1, "", "FGPFPMiner"]], "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner.FGPFPMiner": [[38, 2, 1, "", "getMemoryRSS"], [38, 2, 1, "", "getMemoryUSS"], [38, 2, 1, "", "getPatterns"], [38, 2, 1, "", "getPatternsAsDataFrame"], [38, 2, 1, "", "getRuntime"], [38, 2, 1, "", "mine"], [38, 2, 1, "", "printResults"], [38, 2, 1, "", "save"], [38, 2, 1, "", "startMine"]], "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old": [[38, 1, 1, "", "FGPFPMiner"]], "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old.FGPFPMiner": [[38, 2, 1, "", "generateGraphs"], [38, 2, 1, "", "generateLatexCode"], [38, 2, 1, "", "getMemoryRSS"], [38, 2, 1, "", "getMemoryUSS"], [38, 2, 1, "", "getPatterns"], [38, 2, 1, "", "getPatternsAsDataFrame"], [38, 2, 1, "", "getPatternsAsDataframe"], [38, 2, 1, "", "getRuntime"], [38, 2, 1, "", "mine"], [38, 2, 1, "", "printResults"], [38, 2, 1, "", "save"], [38, 2, 1, "", "startMine"]], "PAMI.fuzzyPartialPeriodicPatterns": [[40, 0, 0, "-", "basic"]], "PAMI.fuzzyPartialPeriodicPatterns.basic": [[40, 0, 0, "-", "F3PMiner"], [40, 0, 0, "-", "abstract"]], "PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner": [[40, 1, 1, "", "F3PMiner"]], "PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner.F3PMiner": [[40, 2, 1, "", "getMemoryRSS"], [40, 2, 1, "", "getMemoryUSS"], [40, 2, 1, "", "getPatterns"], [40, 2, 1, "", "getPatternsAsDataFrame"], [40, 2, 1, "", "getRuntime"], [40, 2, 1, "", "mine"], [40, 2, 1, "", "printResults"], [40, 2, 1, "", "save"], [40, 2, 1, "", "startMine"]], "PAMI.fuzzyPeriodicFrequentPattern": [[42, 0, 0, "-", "basic"]], "PAMI.fuzzyPeriodicFrequentPattern.basic": [[42, 0, 0, "-", "FPFPMiner"], [42, 0, 0, "-", "FPFPMiner_old"], [42, 0, 0, "-", "abstract"]], "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner": [[42, 1, 1, "", "FPFPMiner"]], "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner.FPFPMiner": [[42, 2, 1, "", "getMemoryRSS"], [42, 2, 1, "", "getMemoryUSS"], [42, 2, 1, "", "getPatterns"], [42, 2, 1, "", "getPatternsAsDataFrame"], [42, 2, 1, "", "getRuntime"], [42, 2, 1, "", "mine"], [42, 2, 1, "", "printResults"], [42, 2, 1, "", "save"], [42, 2, 1, "", "startMine"]], "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old": [[42, 1, 1, "", "FPFPMiner"]], "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old.FPFPMiner": [[42, 2, 1, "", "getMemoryRSS"], [42, 2, 1, "", "getMemoryUSS"], [42, 2, 1, "", "getPatterns"], [42, 2, 1, "", "getPatternsAsDataFrame"], [42, 2, 1, "", "getRuntime"], [42, 2, 1, "", "mine"], [42, 2, 1, "", "printResults"], [42, 2, 1, "", "save"], [42, 2, 1, "", "startMine"]], "PAMI.geoReferencedPeriodicFrequentPattern": [[44, 0, 0, "-", "basic"]], "PAMI.geoReferencedPeriodicFrequentPattern.basic": [[44, 0, 0, "-", "GPFPMiner"], [44, 0, 0, "-", "abstract"]], "PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner": [[44, 1, 1, "", "GPFPMiner"]], "PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner.GPFPMiner": [[44, 2, 1, "", "getMemoryRSS"], [44, 2, 1, "", "getMemoryUSS"], [44, 2, 1, "", "getPatterns"], [44, 2, 1, "", "getPatternsAsDataFrame"], [44, 2, 1, "", "getRuntime"], [44, 2, 1, "", "mapNeighbours"], [44, 2, 1, "", "mine"], [44, 2, 1, "", "printResults"], [44, 2, 1, "", "save"], [44, 2, 1, "", "startMine"]], "PAMI.georeferencedFrequentPattern": [[46, 0, 0, "-", "basic"]], "PAMI.georeferencedFrequentPattern.basic": [[46, 0, 0, "-", "SpatialECLAT"], [46, 0, 0, "-", "abstract"]], "PAMI.georeferencedFrequentPattern.basic.SpatialECLAT": [[46, 1, 1, "", "SpatialECLAT"]], "PAMI.georeferencedFrequentPattern.basic.SpatialECLAT.SpatialECLAT": [[46, 2, 1, "", "getMemoryRSS"], [46, 2, 1, "", "getMemoryUSS"], [46, 2, 1, "", "getPatterns"], [46, 2, 1, "", "getPatternsAsDataFrame"], [46, 2, 1, "", "getRuntime"], [46, 2, 1, "", "mine"], [46, 2, 1, "", "printResults"], [46, 2, 1, "", "save"], [46, 2, 1, "", "startMine"]], "PAMI.georeferencedFrequentSequencePattern": [[47, 0, 0, "-", "abstract"]], "PAMI.georeferencedPartialPeriodicPattern": [[49, 0, 0, "-", "basic"]], "PAMI.georeferencedPartialPeriodicPattern.basic": [[49, 0, 0, "-", "STEclat"], [49, 0, 0, "-", "abstract"]], "PAMI.georeferencedPartialPeriodicPattern.basic.STEclat": [[49, 1, 1, "", "STEclat"]], "PAMI.georeferencedPartialPeriodicPattern.basic.STEclat.STEclat": [[49, 2, 1, "", "getMemoryRSS"], [49, 2, 1, "", "getMemoryUSS"], [49, 2, 1, "", "getPatterns"], [49, 2, 1, "", "getPatternsAsDataFrame"], [49, 2, 1, "", "getRuntime"], [49, 2, 1, "", "mapNeighbours"], [49, 2, 1, "", "mine"], [49, 2, 1, "", "printResults"], [49, 2, 1, "", "save"], [49, 2, 1, "", "startMine"]], "PAMI.highUtilityFrequentPattern": [[51, 0, 0, "-", "basic"]], "PAMI.highUtilityFrequentPattern.basic": [[51, 0, 0, "-", "HUFIM"], [51, 0, 0, "-", "abstract"]], "PAMI.highUtilityFrequentPattern.basic.HUFIM": [[51, 1, 1, "", "HUFIM"]], "PAMI.highUtilityFrequentPattern.basic.HUFIM.HUFIM": [[51, 2, 1, "", "getMemoryRSS"], [51, 2, 1, "", "getMemoryUSS"], [51, 2, 1, "", "getPatterns"], [51, 2, 1, "", "getPatternsAsDataFrame"], [51, 2, 1, "", "getRuntime"], [51, 2, 1, "", "mine"], [51, 2, 1, "", "printResults"], [51, 2, 1, "", "save"], [51, 2, 1, "", "startMine"]], "PAMI.highUtilityGeoreferencedFrequentPattern": [[53, 0, 0, "-", "basic"]], "PAMI.highUtilityGeoreferencedFrequentPattern.basic": [[53, 0, 0, "-", "SHUFIM"], [53, 0, 0, "-", "abstract"]], "PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM": [[53, 1, 1, "", "SHUFIM"], [53, 4, 1, "", "main"]], "PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM.SHUFIM": [[53, 2, 1, "", "getMemoryRSS"], [53, 2, 1, "", "getMemoryUSS"], [53, 2, 1, "", "getPatterns"], [53, 2, 1, "", "getPatternsAsDataFrame"], [53, 2, 1, "", "getRuntime"], [53, 2, 1, "", "mine"], [53, 2, 1, "", "printResults"], [53, 2, 1, "", "save"], [53, 2, 1, "", "startMine"]], "PAMI.highUtilityPattern": [[55, 0, 0, "-", "basic"], [56, 0, 0, "-", "parallel"]], "PAMI.highUtilityPattern.basic": [[55, 0, 0, "-", "EFIM"], [55, 0, 0, "-", "HMiner"], [55, 0, 0, "-", "UPGrowth"], [55, 0, 0, "-", "abstract"]], "PAMI.highUtilityPattern.basic.EFIM": [[55, 1, 1, "", "EFIM"]], "PAMI.highUtilityPattern.basic.EFIM.EFIM": [[55, 2, 1, "", "getMemoryRSS"], [55, 2, 1, "", "getMemoryUSS"], [55, 2, 1, "", "getPatterns"], [55, 2, 1, "", "getPatternsAsDataFrame"], [55, 2, 1, "", "getRuntime"], [55, 2, 1, "", "mine"], [55, 2, 1, "", "printResults"], [55, 2, 1, "", "save"], [55, 2, 1, "", "sort_transaction"], [55, 2, 1, "", "startMine"]], "PAMI.highUtilityPattern.basic.HMiner": [[55, 1, 1, "", "HMiner"]], "PAMI.highUtilityPattern.basic.HMiner.HMiner": [[55, 2, 1, "", "getMemoryRSS"], [55, 2, 1, "", "getMemoryUSS"], [55, 2, 1, "", "getPatterns"], [55, 2, 1, "", "getPatternsAsDataFrame"], [55, 2, 1, "", "getRuntime"], [55, 2, 1, "", "mine"], [55, 2, 1, "", "printResults"], [55, 2, 1, "", "save"], [55, 2, 1, "", "startMine"]], "PAMI.highUtilityPattern.basic.UPGrowth": [[55, 1, 1, "", "UPGrowth"]], "PAMI.highUtilityPattern.basic.UPGrowth.UPGrowth": [[55, 2, 1, "", "PrintStats"], [55, 2, 1, "", "getMemoryRSS"], [55, 2, 1, "", "getMemoryUSS"], [55, 2, 1, "", "getPatterns"], [55, 2, 1, "", "getPatternsAsDataFrame"], [55, 2, 1, "", "getRuntime"], [55, 2, 1, "", "mine"], [55, 2, 1, "", "printResults"], [55, 2, 1, "", "save"], [55, 2, 1, "", "startMine"]], "PAMI.highUtilityPattern.parallel": [[56, 0, 0, "-", "abstract"]], "PAMI.highUtilityPatternsInStreams": [[57, 0, 0, "-", "abstract"]], "PAMI.highUtilitySpatialPattern": [[58, 0, 0, "-", "abstract"], [59, 0, 0, "-", "basic"], [60, 0, 0, "-", "topk"]], "PAMI.highUtilitySpatialPattern.abstract": [[58, 1, 1, "", "utilityPatterns"]], "PAMI.highUtilitySpatialPattern.abstract.utilityPatterns": [[58, 2, 1, "", "endTime"], [58, 2, 1, "", "finalPatterns"], [58, 2, 1, "", "getMemoryRSS"], [58, 2, 1, "", "getMemoryUSS"], [58, 2, 1, "", "getPatterns"], [58, 2, 1, "", "getPatternsAsDataFrame"], [58, 2, 1, "", "getRuntime"], [58, 2, 1, "", "iFile"], [58, 2, 1, "", "memoryRSS"], [58, 2, 1, "", "memoryUSS"], [58, 2, 1, "", "minUtil"], [58, 2, 1, "", "nFile"], [58, 2, 1, "", "oFile"], [58, 2, 1, "", "save"], [58, 2, 1, "", "startMine"], [58, 2, 1, "", "startTime"]], "PAMI.highUtilitySpatialPattern.basic": [[59, 0, 0, "-", "HDSHUIM"], [59, 0, 0, "-", "SHUIM"], [59, 0, 0, "-", "abstract"]], "PAMI.highUtilitySpatialPattern.basic.HDSHUIM": [[59, 1, 1, "", "HDSHUIM"]], "PAMI.highUtilitySpatialPattern.basic.HDSHUIM.HDSHUIM": [[59, 2, 1, "", "getMemoryRSS"], [59, 2, 1, "", "getMemoryUSS"], [59, 2, 1, "", "getPatterns"], [59, 2, 1, "", "getPatternsAsDataFrame"], [59, 2, 1, "", "getRuntime"], [59, 2, 1, "", "mine"], [59, 2, 1, "", "printResults"], [59, 2, 1, "", "save"], [59, 2, 1, "", "startMine"]], "PAMI.highUtilitySpatialPattern.basic.SHUIM": [[59, 1, 1, "", "SHUIM"]], "PAMI.highUtilitySpatialPattern.basic.SHUIM.SHUIM": [[59, 2, 1, "", "getMemoryRSS"], [59, 2, 1, "", "getMemoryUSS"], [59, 2, 1, "", "getPatterns"], [59, 2, 1, "", "getPatternsAsDataFrame"], [59, 2, 1, "", "getRuntime"], [59, 2, 1, "", "mine"], [59, 2, 1, "", "printResults"], [59, 2, 1, "", "save"], [59, 2, 1, "", "startMine"]], "PAMI.highUtilitySpatialPattern.topk": [[60, 0, 0, "-", "TKSHUIM"], [60, 0, 0, "-", "abstract"]], "PAMI.highUtilitySpatialPattern.topk.TKSHUIM": [[60, 1, 1, "", "Dataset"], [60, 1, 1, "", "TKSHUIM"], [60, 1, 1, "", "Transaction"], [60, 4, 1, "", "main"]], "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Dataset": [[60, 2, 1, "", "createTransaction"], [60, 2, 1, "", "getMaxItem"], [60, 2, 1, "", "getTransactions"], [60, 3, 1, "", "maxItem"], [60, 3, 1, "", "transactions"]], "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM": [[60, 3, 1, "", "Neighbours"], [60, 2, 1, "", "additemset"], [60, 2, 1, "", "backtrackingEFIM"], [60, 2, 1, "", "calculateNeighbourIntersection"], [60, 3, 1, "", "candidateCount"], [60, 3, 1, "", "endTime"], [60, 3, 1, "", "finalPatterns"], [60, 2, 1, "", "getMemoryRSS"], [60, 2, 1, "", "getMemoryUSS"], [60, 2, 1, "", "getPatterns"], [60, 2, 1, "", "getPatternsAsDataFrame"], [60, 2, 1, "", "getRuntime"], [60, 3, 1, "", "heapList"], [60, 3, 1, "", "iFile"], [60, 3, 1, "", "intTostr"], [60, 2, 1, "", "intersection"], [60, 2, 1, "", "is_equal"], [60, 3, 1, "", "maxMemory"], [60, 3, 1, "", "memoryRSS"], [60, 3, 1, "", "memoryUSS"], [60, 3, 1, "", "minUtil"], [60, 2, 1, "", "mine"], [60, 3, 1, "", "nFile"], [60, 3, 1, "", "newNamesToOldNames"], [60, 3, 1, "", "oFile"], [60, 3, 1, "", "oldNamesToNewNames"], [60, 2, 1, "", "output"], [60, 2, 1, "", "printResults"], [60, 2, 1, "", "save"], [60, 3, 1, "", "sep"], [60, 2, 1, "", "sortDatabase"], [60, 2, 1, "", "sort_transaction"], [60, 2, 1, "", "startMine"], [60, 3, 1, "", "startTime"], [60, 3, 1, "", "strToint"], [60, 3, 1, "", "temp"], [60, 2, 1, "", "useUtilityBinArrayToCalculateLocalUtilityFirstTime"], [60, 2, 1, "", "useUtilityBinArrayToCalculateSubtreeUtilityFirstTime"], [60, 2, 1, "", "useUtilityBinArraysToCalculateUpperBounds"], [60, 3, 1, "", "utilityBinArrayLU"], [60, 3, 1, "", "utilityBinArraySU"]], "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Transaction": [[60, 2, 1, "", "getItems"], [60, 2, 1, "", "getLastPosition"], [60, 2, 1, "", "getPmus"], [60, 2, 1, "", "getUtilities"], [60, 2, 1, "", "insertionSort"], [60, 3, 1, "", "offset"], [60, 3, 1, "", "prefixUtility"], [60, 2, 1, "", "projectTransaction"], [60, 2, 1, "", "removeUnpromisingItems"]], "PAMI.highUtilitySpatialPattern.topk.abstract": [[60, 1, 1, "", "utilityPatterns"]], "PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns": [[60, 2, 1, "", "endTime"], [60, 2, 1, "", "finalPatterns"], [60, 2, 1, "", "getMemoryRSS"], [60, 2, 1, "", "getMemoryUSS"], [60, 2, 1, "", "getPatterns"], [60, 2, 1, "", "getPatternsAsDataFrame"], [60, 2, 1, "", "getRuntime"], [60, 2, 1, "", "iFile"], [60, 2, 1, "", "memoryRSS"], [60, 2, 1, "", "memoryUSS"], [60, 2, 1, "", "nFile"], [60, 2, 1, "", "oFile"], [60, 2, 1, "", "printResults"], [60, 2, 1, "", "save"], [60, 2, 1, "", "startMine"], [60, 2, 1, "", "startTime"]], "PAMI.localPeriodicPattern": [[62, 0, 0, "-", "basic"]], "PAMI.localPeriodicPattern.basic": [[62, 0, 0, "-", "LPPGrowth"], [62, 0, 0, "-", "LPPMBreadth"], [62, 0, 0, "-", "LPPMDepth"], [62, 0, 0, "-", "abstract"]], "PAMI.localPeriodicPattern.basic.LPPGrowth": [[62, 1, 1, "", "LPPGrowth"], [62, 1, 1, "", "Node"], [62, 1, 1, "", "Tree"]], "PAMI.localPeriodicPattern.basic.LPPGrowth.LPPGrowth": [[62, 2, 1, "", "getMemoryRSS"], [62, 2, 1, "", "getMemoryUSS"], [62, 2, 1, "", "getPatterns"], [62, 2, 1, "", "getPatternsAsDataFrame"], [62, 2, 1, "", "getRuntime"], [62, 2, 1, "", "mine"], [62, 2, 1, "", "printResults"], [62, 2, 1, "", "save"], [62, 2, 1, "", "startMine"]], "PAMI.localPeriodicPattern.basic.LPPGrowth.Node": [[62, 2, 1, "", "getChild"]], "PAMI.localPeriodicPattern.basic.LPPGrowth.Tree": [[62, 2, 1, "", "addTransaction"], [62, 2, 1, "", "createPrefixTree"], [62, 2, 1, "", "deleteNode"], [62, 2, 1, "", "fixNodeLinks"]], "PAMI.localPeriodicPattern.basic.LPPMBreadth": [[62, 1, 1, "", "LPPMBreadth"]], "PAMI.localPeriodicPattern.basic.LPPMBreadth.LPPMBreadth": [[62, 2, 1, "", "getMemoryRSS"], [62, 2, 1, "", "getMemoryUSS"], [62, 2, 1, "", "getPatterns"], [62, 2, 1, "", "getPatternsAsDataFrame"], [62, 2, 1, "", "getRuntime"], [62, 2, 1, "", "mine"], [62, 2, 1, "", "printResults"], [62, 2, 1, "", "save"], [62, 2, 1, "", "startMine"]], "PAMI.localPeriodicPattern.basic.LPPMDepth": [[62, 1, 1, "", "LPPMDepth"]], "PAMI.localPeriodicPattern.basic.LPPMDepth.LPPMDepth": [[62, 2, 1, "", "getMemoryRSS"], [62, 2, 1, "", "getMemoryUSS"], [62, 2, 1, "", "getPatterns"], [62, 2, 1, "", "getPatternsAsDataFrame"], [62, 2, 1, "", "getRuntime"], [62, 2, 1, "", "mine"], [62, 2, 1, "", "printResults"], [62, 2, 1, "", "save"], [62, 2, 1, "", "startMine"]], "PAMI.multipleMinimumSupportBasedFrequentPattern": [[64, 0, 0, "-", "basic"]], "PAMI.multipleMinimumSupportBasedFrequentPattern.basic": [[64, 0, 0, "-", "CFPGrowth"], [64, 0, 0, "-", "CFPGrowthPlus"], [64, 0, 0, "-", "abstract"]], "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth": [[64, 1, 1, "", "CFPGrowth"]], "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth.CFPGrowth": [[64, 2, 1, "", "Mine"], [64, 2, 1, "", "getMemoryRSS"], [64, 2, 1, "", "getMemoryUSS"], [64, 2, 1, "", "getPatterns"], [64, 2, 1, "", "getPatternsAsDataFrame"], [64, 2, 1, "", "getRuntime"], [64, 2, 1, "", "printResults"], [64, 2, 1, "", "save"], [64, 2, 1, "", "startMine"]], "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus": [[64, 1, 1, "", "CFPGrowthPlus"]], "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus.CFPGrowthPlus": [[64, 2, 1, "", "Mine"], [64, 2, 1, "", "getMemoryRSS"], [64, 2, 1, "", "getMemoryUSS"], [64, 2, 1, "", "getPatterns"], [64, 2, 1, "", "getPatternsAsDataFrame"], [64, 2, 1, "", "getRuntime"], [64, 2, 1, "", "printResults"], [64, 2, 1, "", "save"], [64, 2, 1, "", "startMine"]], "PAMI.partialPeriodicFrequentPattern": [[66, 0, 0, "-", "basic"]], "PAMI.partialPeriodicFrequentPattern.basic": [[66, 0, 0, "-", "abstract"]], "PAMI.partialPeriodicFrequentPattern.basic.abstract": [[66, 1, 1, "", "partialPeriodicPatterns"]], "PAMI.partialPeriodicFrequentPattern.basic.abstract.partialPeriodicPatterns": [[66, 2, 1, "", "getMemoryRSS"], [66, 2, 1, "", "getMemoryUSS"], [66, 2, 1, "", "getPatterns"], [66, 2, 1, "", "getPatternsAsDataFrame"], [66, 2, 1, "", "getRuntime"], [66, 2, 1, "", "printResults"], [66, 2, 1, "", "save"], [66, 2, 1, "", "startMine"]], "PAMI.partialPeriodicPattern": [[68, 0, 0, "-", "basic"], [69, 0, 0, "-", "closed"], [70, 0, 0, "-", "maximal"], [71, 0, 0, "-", "pyspark"], [72, 0, 0, "-", "topk"]], "PAMI.partialPeriodicPattern.basic": [[68, 0, 0, "-", "Gabstract"], [68, 0, 0, "-", "PPPGrowth"], [68, 0, 0, "-", "PPP_ECLAT"], [68, 0, 0, "-", "abstract"]], "PAMI.partialPeriodicPattern.basic.PPPGrowth": [[68, 1, 1, "", "PPPGrowth"]], "PAMI.partialPeriodicPattern.basic.PPPGrowth.PPPGrowth": [[68, 2, 1, "", "getMemoryRSS"], [68, 2, 1, "", "getMemoryUSS"], [68, 2, 1, "", "getPatterns"], [68, 2, 1, "", "getPatternsAsDataFrame"], [68, 2, 1, "", "getRuntime"], [68, 2, 1, "", "mine"], [68, 2, 1, "", "printResults"], [68, 2, 1, "", "save"], [68, 2, 1, "", "startMine"]], "PAMI.partialPeriodicPattern.basic.PPP_ECLAT": [[68, 1, 1, "", "PPP_ECLAT"]], "PAMI.partialPeriodicPattern.basic.PPP_ECLAT.PPP_ECLAT": [[68, 2, 1, "", "Mine"], [68, 2, 1, "", "getMemoryRSS"], [68, 2, 1, "", "getMemoryUSS"], [68, 2, 1, "", "getPatterns"], [68, 2, 1, "", "getPatternsAsDataFrame"], [68, 2, 1, "", "getRuntime"], [68, 2, 1, "", "printResults"], [68, 2, 1, "", "save"], [68, 2, 1, "", "startMine"]], "PAMI.partialPeriodicPattern.closed": [[69, 0, 0, "-", "PPPClose"], [69, 0, 0, "-", "abstract"]], "PAMI.partialPeriodicPattern.closed.PPPClose": [[69, 1, 1, "", "PPPClose"]], "PAMI.partialPeriodicPattern.closed.PPPClose.PPPClose": [[69, 2, 1, "", "getMemoryRSS"], [69, 2, 1, "", "getMemoryUSS"], [69, 2, 1, "", "getPatterns"], [69, 2, 1, "", "getPatternsAsDataFrame"], [69, 2, 1, "", "getRuntime"], [69, 2, 1, "", "mine"], [69, 2, 1, "", "printResults"], [69, 2, 1, "", "save"], [69, 2, 1, "", "startMine"]], "PAMI.partialPeriodicPattern.maximal": [[70, 0, 0, "-", "abstract"]], "PAMI.partialPeriodicPattern.pyspark": [[71, 0, 0, "-", "abstract"]], "PAMI.partialPeriodicPattern.topk": [[72, 0, 0, "-", "abstract"], [72, 0, 0, "-", "k3PMiner"]], "PAMI.partialPeriodicPattern.topk.abstract": [[72, 1, 1, "", "partialPeriodicPatterns"]], "PAMI.partialPeriodicPattern.topk.abstract.partialPeriodicPatterns": [[72, 2, 1, "", "getMemoryRSS"], [72, 2, 1, "", "getMemoryUSS"], [72, 2, 1, "", "getPatterns"], [72, 2, 1, "", "getPatternsAsDataFrame"], [72, 2, 1, "", "getRuntime"], [72, 2, 1, "", "printResults"], [72, 2, 1, "", "save"], [72, 2, 1, "", "startMine"]], "PAMI.partialPeriodicPattern.topk.k3PMiner": [[72, 1, 1, "", "k3PMiner"]], "PAMI.partialPeriodicPattern.topk.k3PMiner.k3PMiner": [[72, 2, 1, "", "getMemoryRSS"], [72, 2, 1, "", "getMemoryUSS"], [72, 2, 1, "", "getPatterns"], [72, 2, 1, "", "getPatternsAsDataFrame"], [72, 2, 1, "", "getRuntime"], [72, 2, 1, "", "mine"], [72, 2, 1, "", "printResults"], [72, 2, 1, "", "save"], [72, 2, 1, "", "startMine"]], "PAMI.partialPeriodicPatternInMultipleTimeSeries": [[73, 0, 0, "-", "PPGrowth"], [73, 0, 0, "-", "abstract"]], "PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth": [[73, 1, 1, "", "PPGrowth"]], "PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth.PPGrowth": [[73, 2, 1, "", "Mine"], [73, 2, 1, "", "getMemoryRSS"], [73, 2, 1, "", "getMemoryUSS"], [73, 2, 1, "", "getPatterns"], [73, 2, 1, "", "getPatternsAsDataFrame"], [73, 2, 1, "", "getRuntime"], [73, 2, 1, "", "printResults"], [73, 2, 1, "", "save"], [73, 2, 1, "", "startMine"]], "PAMI.periodicCorrelatedPattern": [[75, 0, 0, "-", "basic"]], "PAMI.periodicCorrelatedPattern.basic": [[75, 0, 0, "-", "EPCPGrowth"], [75, 0, 0, "-", "abstract"]], "PAMI.periodicCorrelatedPattern.basic.EPCPGrowth": [[75, 1, 1, "", "EPCPGrowth"]], "PAMI.periodicCorrelatedPattern.basic.EPCPGrowth.EPCPGrowth": [[75, 2, 1, "", "getMemoryRSS"], [75, 2, 1, "", "getMemoryUSS"], [75, 2, 1, "", "getPatterns"], [75, 2, 1, "", "getPatternsAsDataFrame"], [75, 2, 1, "", "getRuntime"], [75, 2, 1, "", "printResults"], [75, 2, 1, "", "save"], [75, 2, 1, "", "startMine"]], "PAMI.periodicFrequentPattern": [[77, 0, 0, "-", "basic"], [78, 0, 0, "-", "closed"], [79, 0, 0, "-", "cuda"], [80, 0, 0, "-", "maximal"], [81, 0, 0, "-", "pyspark"], [82, 0, 0, "-", "topk"]], "PAMI.periodicFrequentPattern.basic": [[77, 0, 0, "-", "PFECLAT"], [77, 0, 0, "-", "PFPGrowth"], [77, 0, 0, "-", "PFPGrowthPlus"], [77, 0, 0, "-", "PFPMC"], [77, 0, 0, "-", "PSGrowth"], [77, 0, 0, "-", "abstract"]], "PAMI.periodicFrequentPattern.basic.PFECLAT": [[77, 1, 1, "", "PFECLAT"]], "PAMI.periodicFrequentPattern.basic.PFECLAT.PFECLAT": [[77, 2, 1, "", "Mine"], [77, 2, 1, "", "getMemoryRSS"], [77, 2, 1, "", "getMemoryUSS"], [77, 2, 1, "", "getPatterns"], [77, 2, 1, "", "getPatternsAsDataFrame"], [77, 2, 1, "", "getRuntime"], [77, 2, 1, "", "printResults"], [77, 2, 1, "", "save"], [77, 2, 1, "", "startMine"]], "PAMI.periodicFrequentPattern.basic.PFPGrowth": [[77, 1, 1, "", "PFPGrowth"]], "PAMI.periodicFrequentPattern.basic.PFPGrowth.PFPGrowth": [[77, 2, 1, "", "Mine"], [77, 2, 1, "", "getMemoryRSS"], [77, 2, 1, "", "getMemoryUSS"], [77, 2, 1, "", "getPatterns"], [77, 2, 1, "", "getPatternsAsDataFrame"], [77, 2, 1, "", "getRuntime"], [77, 2, 1, "", "printResults"], [77, 2, 1, "", "save"], [77, 2, 1, "", "startMine"]], "PAMI.periodicFrequentPattern.basic.PFPGrowthPlus": [[77, 1, 1, "", "PFPGrowthPlus"]], "PAMI.periodicFrequentPattern.basic.PFPGrowthPlus.PFPGrowthPlus": [[77, 2, 1, "", "getMemoryRSS"], [77, 2, 1, "", "getMemoryUSS"], [77, 2, 1, "", "getPatterns"], [77, 2, 1, "", "getPatternsAsDataFrame"], [77, 2, 1, "", "getRuntime"], [77, 2, 1, "", "printResults"], [77, 2, 1, "", "save"], [77, 2, 1, "", "startMine"]], "PAMI.periodicFrequentPattern.basic.PFPMC": [[77, 1, 1, "", "PFPMC"]], "PAMI.periodicFrequentPattern.basic.PFPMC.PFPMC": [[77, 2, 1, "", "getMemoryRSS"], [77, 2, 1, "", "getMemoryUSS"], [77, 2, 1, "", "getPatterns"], [77, 2, 1, "", "getPatternsAsDataFrame"], [77, 2, 1, "", "getRuntime"], [77, 2, 1, "", "printResults"], [77, 2, 1, "", "save"], [77, 2, 1, "", "startMine"]], "PAMI.periodicFrequentPattern.basic.PSGrowth": [[77, 1, 1, "", "Node"], [77, 1, 1, "", "PSGrowth"], [77, 4, 1, "", "conditionalTransactions"], [77, 4, 1, "", "getPeriodAndSupport"]], "PAMI.periodicFrequentPattern.basic.PSGrowth.Node": [[77, 2, 1, "", "addChild"]], "PAMI.periodicFrequentPattern.basic.PSGrowth.PSGrowth": [[77, 2, 1, "", "Mine"], [77, 2, 1, "", "getMemoryRSS"], [77, 2, 1, "", "getMemoryUSS"], [77, 2, 1, "", "getPatterns"], [77, 2, 1, "", "getPatternsAsDataFrame"], [77, 2, 1, "", "getRuntime"], [77, 2, 1, "", "printResults"], [77, 2, 1, "", "save"], [77, 2, 1, "", "startMine"]], "PAMI.periodicFrequentPattern.closed": [[78, 0, 0, "-", "CPFPMiner"], [78, 0, 0, "-", "abstract"]], "PAMI.periodicFrequentPattern.closed.CPFPMiner": [[78, 1, 1, "", "CPFPMiner"]], "PAMI.periodicFrequentPattern.closed.CPFPMiner.CPFPMiner": [[78, 2, 1, "", "Mine"], [78, 2, 1, "", "getMemoryRSS"], [78, 2, 1, "", "getMemoryUSS"], [78, 2, 1, "", "getPatterns"], [78, 2, 1, "", "getPatternsAsDataFrame"], [78, 2, 1, "", "getRuntime"], [78, 2, 1, "", "printResults"], [78, 2, 1, "", "save"], [78, 2, 1, "", "startMine"]], "PAMI.periodicFrequentPattern.maximal": [[80, 0, 0, "-", "MaxPFGrowth"], [80, 0, 0, "-", "abstract"]], "PAMI.periodicFrequentPattern.maximal.MaxPFGrowth": [[80, 1, 1, "", "MaxPFGrowth"]], "PAMI.periodicFrequentPattern.maximal.MaxPFGrowth.MaxPFGrowth": [[80, 2, 1, "", "Mine"], [80, 2, 1, "", "getMemoryRSS"], [80, 2, 1, "", "getMemoryUSS"], [80, 2, 1, "", "getPatterns"], [80, 2, 1, "", "getPatternsAsDataFrame"], [80, 2, 1, "", "getRuntime"], [80, 2, 1, "", "printResults"], [80, 2, 1, "", "save"], [80, 2, 1, "", "startMine"]], "PAMI.periodicFrequentPattern.topk": [[83, 0, 0, "-", "TopkPFP"], [84, 0, 0, "-", "kPFPMiner"]], "PAMI.periodicFrequentPattern.topk.TopkPFP": [[83, 0, 0, "-", "TopkPFP"], [83, 0, 0, "-", "abstract"]], "PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP": [[83, 1, 1, "", "TopkPFPGrowth"]], "PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP.TopkPFPGrowth": [[83, 2, 1, "", "Mine"], [83, 2, 1, "", "getMemoryRSS"], [83, 2, 1, "", "getMemoryUSS"], [83, 2, 1, "", "getPatterns"], [83, 2, 1, "", "getPatternsAsDataFrame"], [83, 2, 1, "", "getRuntime"], [83, 2, 1, "", "printResults"], [83, 2, 1, "", "save"], [83, 2, 1, "", "startMine"]], "PAMI.periodicFrequentPattern.topk.kPFPMiner": [[84, 0, 0, "-", "abstract"], [84, 0, 0, "-", "kPFPMiner"]], "PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner": [[84, 1, 1, "", "kPFPMiner"]], "PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner.kPFPMiner": [[84, 2, 1, "", "getMemoryRSS"], [84, 2, 1, "", "getMemoryUSS"], [84, 2, 1, "", "getPatterns"], [84, 2, 1, "", "getPatternsAsDataFrame"], [84, 2, 1, "", "getPer_Sup"], [84, 2, 1, "", "getRuntime"], [84, 3, 1, "", "lno"], [84, 2, 1, "", "printResults"], [84, 2, 1, "", "save"], [84, 2, 1, "", "startMine"]], "PAMI.recurringPattern": [[86, 0, 0, "-", "basic"]], "PAMI.recurringPattern.basic": [[86, 0, 0, "-", "RPGrowth"], [86, 0, 0, "-", "abstract"]], "PAMI.recurringPattern.basic.RPGrowth": [[86, 1, 1, "", "RPGrowth"]], "PAMI.recurringPattern.basic.RPGrowth.RPGrowth": [[86, 2, 1, "", "Mine"], [86, 2, 1, "", "getMemoryRSS"], [86, 2, 1, "", "getMemoryUSS"], [86, 2, 1, "", "getPatterns"], [86, 2, 1, "", "getPatternsAsDataFrame"], [86, 2, 1, "", "getRuntime"], [86, 2, 1, "", "printResults"], [86, 2, 1, "", "save"], [86, 2, 1, "", "startMine"]], "PAMI.relativeFrequentPattern": [[88, 0, 0, "-", "basic"]], "PAMI.relativeFrequentPattern.basic": [[88, 0, 0, "-", "RSFPGrowth"], [88, 0, 0, "-", "abstract"]], "PAMI.relativeFrequentPattern.basic.RSFPGrowth": [[88, 1, 1, "", "RSFPGrowth"]], "PAMI.relativeFrequentPattern.basic.RSFPGrowth.RSFPGrowth": [[88, 2, 1, "", "Mine"], [88, 2, 1, "", "getMemoryRSS"], [88, 2, 1, "", "getMemoryUSS"], [88, 2, 1, "", "getPatterns"], [88, 2, 1, "", "getPatternsAsDataFrame"], [88, 2, 1, "", "getRuntime"], [88, 2, 1, "", "printResults"], [88, 2, 1, "", "save"], [88, 2, 1, "", "startMine"]], "PAMI.relativeHighUtilityPattern": [[90, 0, 0, "-", "basic"]], "PAMI.relativeHighUtilityPattern.basic": [[90, 0, 0, "-", "RHUIM"], [90, 0, 0, "-", "abstract"]], "PAMI.relativeHighUtilityPattern.basic.RHUIM": [[90, 1, 1, "", "RHUIM"]], "PAMI.relativeHighUtilityPattern.basic.RHUIM.RHUIM": [[90, 2, 1, "", "getMemoryRSS"], [90, 2, 1, "", "getMemoryUSS"], [90, 2, 1, "", "getPatterns"], [90, 2, 1, "", "getPatternsAsDataFrame"], [90, 2, 1, "", "getRuntime"], [90, 2, 1, "", "printResults"], [90, 2, 1, "", "save"], [90, 2, 1, "", "sortDatabase"], [90, 2, 1, "", "sort_transaction"], [90, 2, 1, "", "startMine"]], "PAMI.sequentialPatternMining": [[93, 0, 0, "-", "basic"], [94, 0, 0, "-", "closed"]], "PAMI.sequentialPatternMining.basic": [[93, 0, 0, "-", "SPADE"], [93, 0, 0, "-", "SPAM"], [93, 0, 0, "-", "abstract"], [93, 0, 0, "-", "prefixSpan"]], "PAMI.sequentialPatternMining.basic.SPADE": [[93, 1, 1, "", "SPADE"]], "PAMI.sequentialPatternMining.basic.SPADE.SPADE": [[93, 2, 1, "", "Mine"], [93, 2, 1, "", "getMemoryRSS"], [93, 2, 1, "", "getMemoryUSS"], [93, 2, 1, "", "getPatterns"], [93, 2, 1, "", "getPatternsAsDataFrame"], [93, 2, 1, "", "getRuntime"], [93, 2, 1, "", "make1LenDatabase"], [93, 2, 1, "", "make2LenDatabase"], [93, 2, 1, "", "make3LenDatabase"], [93, 2, 1, "", "makeNextRow"], [93, 2, 1, "", "makeNextRowSame"], [93, 2, 1, "", "makeNextRowSame2"], [93, 2, 1, "", "makeNextRowSame3"], [93, 2, 1, "", "makexLenDatabase"], [93, 2, 1, "", "makexLenDatabaseSame"], [93, 2, 1, "", "printResults"], [93, 2, 1, "", "save"], [93, 2, 1, "", "startMine"]], "PAMI.sequentialPatternMining.basic.SPAM": [[93, 1, 1, "", "SPAM"]], "PAMI.sequentialPatternMining.basic.SPAM.SPAM": [[93, 2, 1, "", "DfsPruning"], [93, 2, 1, "", "Sstep"], [93, 2, 1, "", "countSup"], [93, 2, 1, "", "getMemoryRSS"], [93, 2, 1, "", "getMemoryUSS"], [93, 2, 1, "", "getPatterns"], [93, 2, 1, "", "getPatternsAsDataFrame"], [93, 2, 1, "", "getRuntime"], [93, 2, 1, "", "make2BitDatabase"], [93, 2, 1, "", "printResults"], [93, 2, 1, "", "save"], [93, 2, 1, "", "startMine"]], "PAMI.sequentialPatternMining.basic.prefixSpan": [[93, 1, 1, "", "prefixSpan"]], "PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan": [[93, 2, 1, "", "Mine"], [93, 2, 1, "", "getMemoryRSS"], [93, 2, 1, "", "getMemoryUSS"], [93, 2, 1, "", "getPatterns"], [93, 2, 1, "", "getPatternsAsDataFrame"], [93, 2, 1, "", "getRuntime"], [93, 2, 1, "", "getSameSeq"], [93, 2, 1, "", "makeNext"], [93, 2, 1, "", "makeNextSame"], [93, 2, 1, "", "makeSeqDatabaseFirst"], [93, 2, 1, "", "makeSeqDatabaseSame"], [93, 2, 1, "", "makeSupDatabase"], [93, 2, 1, "", "printResults"], [93, 2, 1, "", "save"], [93, 2, 1, "", "serchSame"], [93, 2, 1, "", "startMine"]], "PAMI.sequentialPatternMining.closed": [[94, 0, 0, "-", "abstract"], [94, 0, 0, "-", "bide"]], "PAMI.stablePeriodicFrequentPattern": [[96, 0, 0, "-", "basic"], [97, 0, 0, "-", "topK"]], "PAMI.stablePeriodicFrequentPattern.basic": [[96, 0, 0, "-", "SPPEclat"], [96, 0, 0, "-", "abstract"]], "PAMI.stablePeriodicFrequentPattern.basic.SPPEclat": [[96, 1, 1, "", "SPPEclat"]], "PAMI.stablePeriodicFrequentPattern.basic.SPPEclat.SPPEclat": [[96, 2, 1, "", "getMemoryRSS"], [96, 2, 1, "", "getMemoryUSS"], [96, 2, 1, "", "getPatterns"], [96, 2, 1, "", "getPatternsAsDataFrame"], [96, 2, 1, "", "getRuntime"], [96, 2, 1, "", "mine"], [96, 2, 1, "", "printResults"], [96, 2, 1, "", "save"], [96, 2, 1, "", "startMine"]], "PAMI.stablePeriodicFrequentPattern.topK": [[97, 0, 0, "-", "TSPIN"], [97, 0, 0, "-", "abstract"]], "PAMI.stablePeriodicFrequentPattern.topK.TSPIN": [[97, 1, 1, "", "TSPIN"]], "PAMI.stablePeriodicFrequentPattern.topK.TSPIN.TSPIN": [[97, 2, 1, "", "getMemoryRSS"], [97, 2, 1, "", "getMemoryUSS"], [97, 2, 1, "", "getPatterns"], [97, 2, 1, "", "getPatternsAsDataFrame"], [97, 2, 1, "", "getRuntime"], [97, 2, 1, "", "printResults"], [97, 2, 1, "", "save"], [97, 2, 1, "", "startMine"]], "PAMI.subgraphMining": [[99, 0, 0, "-", "basic"], [100, 0, 0, "-", "topK"]], "PAMI.subgraphMining.basic": [[99, 0, 0, "-", "abstract"], [99, 0, 0, "-", "dfsCode"], [99, 0, 0, "-", "edge"], [99, 0, 0, "-", "extendedEdge"], [99, 0, 0, "-", "frequentSubgraph"], [99, 0, 0, "-", "graph"], [99, 0, 0, "-", "gspan"], [99, 0, 0, "-", "sparseTriangularMatrix"], [99, 0, 0, "-", "vertex"]], "PAMI.subgraphMining.basic.dfsCode": [[99, 1, 1, "", "DFSCode"]], "PAMI.subgraphMining.basic.dfsCode.DFSCode": [[99, 2, 1, "", "add"], [99, 2, 1, "", "containEdge"], [99, 2, 1, "", "copy"], [99, 2, 1, "", "getAllVLabels"], [99, 2, 1, "", "getAt"], [99, 2, 1, "", "getEeList"], [99, 2, 1, "", "getRightMost"], [99, 2, 1, "", "getRightMostPath"], [99, 2, 1, "", "isEmpty"], [99, 2, 1, "", "notPreOfRm"], [99, 2, 1, "", "onRightMostPath"]], "PAMI.subgraphMining.basic.edge": [[99, 1, 1, "", "Edge"]], "PAMI.subgraphMining.basic.edge.Edge": [[99, 2, 1, "", "another"], [99, 2, 1, "", "getEdgeLabel"]], "PAMI.subgraphMining.basic.extendedEdge": [[99, 1, 1, "", "ExtendedEdge"]], "PAMI.subgraphMining.basic.extendedEdge.ExtendedEdge": [[99, 2, 1, "", "getEdgeLabel"], [99, 2, 1, "", "getV1"], [99, 2, 1, "", "getV2"], [99, 2, 1, "", "getVLabel1"], [99, 2, 1, "", "getVLabel2"], [99, 2, 1, "", "pairSmallerThan"], [99, 2, 1, "", "smallerThan"], [99, 2, 1, "", "smallerThanOriginal"]], "PAMI.subgraphMining.basic.frequentSubgraph": [[99, 1, 1, "", "FrequentSubgraph"]], "PAMI.subgraphMining.basic.graph": [[99, 1, 1, "", "Graph"]], "PAMI.subgraphMining.basic.graph.Graph": [[99, 3, 1, "", "emptyIntegerArray"], [99, 3, 1, "", "emptyVertexList"], [99, 2, 1, "", "findAllWithLabel"], [99, 2, 1, "", "getAllNeighbors"], [99, 2, 1, "", "getAllVertices"], [99, 2, 1, "", "getEdge"], [99, 2, 1, "", "getEdgeCount"], [99, 2, 1, "", "getEdgeLabel"], [99, 2, 1, "", "getId"], [99, 2, 1, "", "getNonPrecalculatedAllVertices"], [99, 2, 1, "", "getVLabel"], [99, 2, 1, "", "isNeighboring"], [99, 2, 1, "", "precalculateLabelsToVertices"], [99, 2, 1, "", "precalculateVertexList"], [99, 2, 1, "", "precalculateVertexNeighbors"], [99, 2, 1, "", "removeInfrequentLabel"]], "PAMI.subgraphMining.basic.gspan": [[99, 1, 1, "", "GSpan"]], "PAMI.subgraphMining.basic.gspan.GSpan": [[99, 1, 1, "", "Pair"], [99, 3, 1, "", "edge_count_pruning"], [99, 3, 1, "", "eliminate_infrequent_edge_labels"], [99, 3, 1, "", "eliminate_infrequent_vertex_pairs"], [99, 3, 1, "", "eliminate_infrequent_vertices"], [99, 2, 1, "", "findAllOnlyOneVertex"], [99, 2, 1, "", "gSpan"], [99, 2, 1, "", "getFrequentSubgraphs"], [99, 2, 1, "", "getMemoryRSS"], [99, 2, 1, "", "getMemoryUSS"], [99, 2, 1, "", "getRuntime"], [99, 2, 1, "", "gspanDFS"], [99, 2, 1, "", "isCanonical"], [99, 2, 1, "", "readGraphs"], [99, 2, 1, "", "removeInfrequentVertexPairs"], [99, 2, 1, "", "rightMostPathExtensions"], [99, 2, 1, "", "rightMostPathExtensionsFromSingle"], [99, 2, 1, "", "save"], [99, 2, 1, "", "startMine"], [99, 2, 1, "", "subgraphIsomorphisms"]], "PAMI.subgraphMining.basic.sparseTriangularMatrix": [[99, 1, 1, "", "SparseTriangularMatrix"]], "PAMI.subgraphMining.basic.sparseTriangularMatrix.SparseTriangularMatrix": [[99, 2, 1, "", "getSupportForItems"], [99, 2, 1, "", "incrementCount"], [99, 2, 1, "", "removeInfrequentEntriesFromMatrix"], [99, 2, 1, "", "setSupport"]], "PAMI.subgraphMining.basic.vertex": [[99, 1, 1, "", "Vertex"]], "PAMI.subgraphMining.basic.vertex.Vertex": [[99, 2, 1, "", "addEdge"], [99, 2, 1, "", "getEdgeList"], [99, 2, 1, "", "getId"], [99, 2, 1, "", "getLabel"], [99, 2, 1, "", "removeEdge"]], "PAMI.subgraphMining.topK": [[100, 0, 0, "-", "DFSCode"], [100, 0, 0, "-", "DFSThread"], [100, 0, 0, "-", "abstract"], [100, 0, 0, "-", "edge"], [100, 0, 0, "-", "extendedEdge"], [100, 0, 0, "-", "frequentSubgraph"], [100, 0, 0, "-", "graph"], [100, 0, 0, "-", "sparseTriangularMatrix"], [100, 0, 0, "-", "tkg"], [100, 0, 0, "-", "vertex"]], "PAMI.subgraphMining.topK.DFSCode": [[100, 1, 1, "", "DfsCode"]], "PAMI.subgraphMining.topK.DFSCode.DfsCode": [[100, 2, 1, "", "add"], [100, 2, 1, "", "containEdge"], [100, 2, 1, "", "copy"], [100, 2, 1, "", "getAllVLabels"], [100, 2, 1, "", "getAt"], [100, 2, 1, "", "getEeList"], [100, 2, 1, "", "getRightMost"], [100, 2, 1, "", "getRightMostPath"], [100, 2, 1, "", "isEmpty"], [100, 2, 1, "", "notPreOfRm"], [100, 2, 1, "", "onRightMostPath"]], "PAMI.subgraphMining.topK.DFSThread": [[100, 1, 1, "", "DfsThread"]], "PAMI.subgraphMining.topK.DFSThread.DfsThread": [[100, 2, 1, "", "run"]], "PAMI.subgraphMining.topK.edge": [[100, 1, 1, "", "Edge"]], "PAMI.subgraphMining.topK.edge.Edge": [[100, 2, 1, "", "another"], [100, 2, 1, "", "getEdgeLabel"]], "PAMI.subgraphMining.topK.extendedEdge": [[100, 1, 1, "", "ExtendedEdge"]], "PAMI.subgraphMining.topK.extendedEdge.ExtendedEdge": [[100, 2, 1, "", "getEdgeLabel"], [100, 2, 1, "", "getV1"], [100, 2, 1, "", "getV2"], [100, 2, 1, "", "getVLabel1"], [100, 2, 1, "", "getVLabel2"], [100, 2, 1, "", "pairSmallerThan"], [100, 2, 1, "", "smallerThan"], [100, 2, 1, "", "smallerThanOriginal"]], "PAMI.subgraphMining.topK.frequentSubgraph": [[100, 1, 1, "", "FrequentSubgraph"]], "PAMI.subgraphMining.topK.graph": [[100, 1, 1, "", "Graph"]], "PAMI.subgraphMining.topK.graph.Graph": [[100, 3, 1, "", "EMPTY_INTEGER_ARRAY"], [100, 3, 1, "", "EMPTY_VERTEX_LIST"], [100, 2, 1, "", "findAllWithLabel"], [100, 2, 1, "", "getAllNeighbors"], [100, 2, 1, "", "getAllVertices"], [100, 2, 1, "", "getEdge"], [100, 2, 1, "", "getEdgeCount"], [100, 2, 1, "", "getEdgeLabel"], [100, 2, 1, "", "getId"], [100, 2, 1, "", "getNonPrecalculatedAllVertices"], [100, 2, 1, "", "getVLabel"], [100, 2, 1, "", "isNeighboring"], [100, 2, 1, "", "precalculateLabelsToVertices"], [100, 2, 1, "", "precalculateVertexList"], [100, 2, 1, "", "precalculateVertexNeighbors"], [100, 2, 1, "", "removeInfrequentLabel"]], "PAMI.subgraphMining.topK.sparseTriangularMatrix": [[100, 1, 1, "", "SparseTriangularMatrix"]], "PAMI.subgraphMining.topK.sparseTriangularMatrix.SparseTriangularMatrix": [[100, 2, 1, "", "getSupportForItems"], [100, 2, 1, "", "incrementCount"], [100, 2, 1, "", "removeInfrequentEntriesFromMatrix"], [100, 2, 1, "", "setSupport"]], "PAMI.subgraphMining.topK.tkg": [[100, 1, 1, "", "TKG"]], "PAMI.subgraphMining.topK.tkg.TKG": [[100, 3, 1, "", "DYNAMIC_SEARCH"], [100, 3, 1, "", "EDGE_COUNT_PRUNING"], [100, 3, 1, "", "ELIMINATE_INFREQUENT_EDGE_LABELS"], [100, 3, 1, "", "ELIMINATE_INFREQUENT_VERTEX_PAIRS"], [100, 3, 1, "", "ELIMINATE_INFREQUENT_VERTICES"], [100, 1, 1, "", "Pair"], [100, 3, 1, "", "THREADED_DYNAMIC_SEARCH"], [100, 2, 1, "", "findAllOnlyOneVertex"], [100, 2, 1, "", "gSpan"], [100, 2, 1, "", "getKSubgraphs"], [100, 2, 1, "", "getMemoryRSS"], [100, 2, 1, "", "getMemoryUSS"], [100, 2, 1, "", "getMinSupport"], [100, 2, 1, "", "getQueueSize"], [100, 2, 1, "", "getRuntime"], [100, 2, 1, "", "getSubgraphs"], [100, 2, 1, "", "gspanDfs"], [100, 2, 1, "", "gspanDynamicDFS"], [100, 2, 1, "", "isCanonical"], [100, 2, 1, "", "readGraphs"], [100, 2, 1, "", "registerAsCandidate"], [100, 2, 1, "", "removeInfrequentVertexPairs"], [100, 2, 1, "", "rightMostPathExtensions"], [100, 2, 1, "", "rightMostPathExtensionsFromSingle"], [100, 2, 1, "", "save"], [100, 2, 1, "", "savePattern"], [100, 2, 1, "", "startMine"], [100, 2, 1, "", "startThreads"], [100, 2, 1, "", "subgraphIsomorphisms"]], "PAMI.subgraphMining.topK.vertex": [[100, 1, 1, "", "Vertex"]], "PAMI.subgraphMining.topK.vertex.Vertex": [[100, 2, 1, "", "addEdge"], [100, 2, 1, "", "getEdgeList"], [100, 2, 1, "", "getId"], [100, 2, 1, "", "getLabel"], [100, 2, 1, "", "removeEdge"]], "PAMI.uncertainFaultTolerantFrequentPattern": [[101, 0, 0, "-", "VBFTMine"], [101, 0, 0, "-", "abstract"]], "PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine": [[101, 1, 1, "", "VBFTMine"]], "PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine.VBFTMine": [[101, 2, 1, "", "getMemoryRSS"], [101, 2, 1, "", "getMemoryUSS"], [101, 2, 1, "", "getPatterns"], [101, 2, 1, "", "getPatternsAsDataFrame"], [101, 2, 1, "", "getRuntime"], [101, 2, 1, "", "mine"], [101, 2, 1, "", "printResults"], [101, 2, 1, "", "save"], [101, 2, 1, "", "startMine"]], "PAMI.uncertainFrequentPattern": [[103, 0, 0, "-", "basic"]], "PAMI.uncertainFrequentPattern.basic": [[103, 0, 0, "-", "CUFPTree"], [103, 0, 0, "-", "abstract"]], "PAMI.uncertainFrequentPattern.basic.CUFPTree": [[103, 1, 1, "", "CUFPTree"]], "PAMI.uncertainFrequentPattern.basic.CUFPTree.CUFPTree": [[103, 2, 1, "", "getMemoryRSS"], [103, 2, 1, "", "getMemoryUSS"], [103, 2, 1, "", "getPatterns"], [103, 2, 1, "", "getPatternsAsDataFrame"], [103, 2, 1, "", "getRuntime"], [103, 2, 1, "", "mine"], [103, 2, 1, "", "printResults"], [103, 2, 1, "", "save"], [103, 2, 1, "", "startMine"]], "PAMI.uncertainGeoreferencedFrequentPattern": [[105, 0, 0, "-", "basic"]], "PAMI.uncertainGeoreferencedFrequentPattern.basic": [[105, 0, 0, "-", "GFPGrowth"], [105, 0, 0, "-", "abstract"]], "PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth": [[105, 1, 1, "", "GFPGrowth"]], "PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth.GFPGrowth": [[105, 2, 1, "", "getMemoryRSS"], [105, 2, 1, "", "getMemoryUSS"], [105, 2, 1, "", "getPatterns"], [105, 2, 1, "", "getPatternsAsDataFrame"], [105, 2, 1, "", "getRuntime"], [105, 2, 1, "", "mine"], [105, 2, 1, "", "printResults"], [105, 2, 1, "", "save"], [105, 2, 1, "", "startMine"]], "PAMI.uncertainPeriodicFrequentPattern": [[107, 0, 0, "-", "basic"]], "PAMI.uncertainPeriodicFrequentPattern.basic": [[107, 0, 0, "-", "UPFPGrowth"], [107, 0, 0, "-", "UPFPGrowthPlus"], [107, 0, 0, "-", "abstract"]], "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth": [[107, 1, 1, "", "UPFPGrowth"]], "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth.UPFPGrowth": [[107, 2, 1, "", "getMemoryRSS"], [107, 2, 1, "", "getMemoryUSS"], [107, 2, 1, "", "getPatterns"], [107, 2, 1, "", "getPatternsAsDataFrame"], [107, 2, 1, "", "getRuntime"], [107, 2, 1, "", "mine"], [107, 2, 1, "", "printResults"], [107, 2, 1, "", "save"], [107, 2, 1, "", "startMine"]], "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus": [[107, 1, 1, "", "UPFPGrowthPlus"], [107, 4, 1, "", "printTree"]], "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus.UPFPGrowthPlus": [[107, 2, 1, "", "getMemoryRSS"], [107, 2, 1, "", "getMemoryUSS"], [107, 2, 1, "", "getPatterns"], [107, 2, 1, "", "getPatternsAsDataFrame"], [107, 2, 1, "", "getRuntime"], [107, 2, 1, "", "mine"], [107, 2, 1, "", "printResults"], [107, 2, 1, "", "save"], [107, 2, 1, "", "startMine"]], "PAMI.weightedFrequentNeighbourhoodPattern": [[109, 0, 0, "-", "basic"]], "PAMI.weightedFrequentNeighbourhoodPattern.basic": [[109, 0, 0, "-", "SWFPGrowth"], [109, 0, 0, "-", "abstract"]], "PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth": [[109, 1, 1, "", "SWFPGrowth"]], "PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth.SWFPGrowth": [[109, 2, 1, "", "getMemoryRSS"], [109, 2, 1, "", "getMemoryUSS"], [109, 2, 1, "", "getPatterns"], [109, 2, 1, "", "getPatternsAsDataFrame"], [109, 2, 1, "", "getRuntime"], [109, 2, 1, "", "mine"], [109, 2, 1, "", "printResults"], [109, 2, 1, "", "save"], [109, 2, 1, "", "startMine"]], "PAMI.weightedFrequentPattern": [[111, 0, 0, "-", "basic"]], "PAMI.weightedFrequentPattern.basic": [[111, 0, 0, "-", "WFIM"], [111, 0, 0, "-", "abstract"]], "PAMI.weightedFrequentPattern.basic.WFIM": [[111, 1, 1, "", "WFIM"]], "PAMI.weightedFrequentPattern.basic.WFIM.WFIM": [[111, 2, 1, "", "getMemoryRSS"], [111, 2, 1, "", "getMemoryUSS"], [111, 2, 1, "", "getPatterns"], [111, 2, 1, "", "getPatternsAsDataFrame"], [111, 2, 1, "", "getRuntime"], [111, 2, 1, "", "mine"], [111, 2, 1, "", "printResults"], [111, 2, 1, "", "save"], [111, 2, 1, "", "startMine"]], "PAMI.weightedFrequentRegularPattern": [[113, 0, 0, "-", "basic"]], "PAMI.weightedFrequentRegularPattern.basic": [[113, 0, 0, "-", "WFRIMiner"], [113, 0, 0, "-", "abstract"]], "PAMI.weightedFrequentRegularPattern.basic.WFRIMiner": [[113, 1, 1, "", "WFRIMiner"]], "PAMI.weightedFrequentRegularPattern.basic.WFRIMiner.WFRIMiner": [[113, 2, 1, "", "getMemoryRSS"], [113, 2, 1, "", "getMemoryUSS"], [113, 2, 1, "", "getPatterns"], [113, 2, 1, "", "getPatternsAsDataFrame"], [113, 2, 1, "", "getRuntime"], [113, 2, 1, "", "mine"], [113, 2, 1, "", "printResults"], [113, 2, 1, "", "save"], [113, 2, 1, "", "startMine"]], "PAMI.weightedUncertainFrequentPattern": [[115, 0, 0, "-", "basic"]], "PAMI.weightedUncertainFrequentPattern.basic": [[115, 0, 0, "-", "WUFIM"], [115, 0, 0, "-", "abstract"]], "PAMI.weightedUncertainFrequentPattern.basic.WUFIM": [[115, 1, 1, "", "WUFIM"]], "PAMI.weightedUncertainFrequentPattern.basic.WUFIM.WUFIM": [[115, 2, 1, "", "getMemoryRSS"], [115, 2, 1, "", "getMemoryUSS"], [115, 2, 1, "", "getPatterns"], [115, 2, 1, "", "getPatternsAsDataFrame"], [115, 2, 1, "", "getRuntime"], [115, 2, 1, "", "mine"], [115, 2, 1, "", "printResults"], [115, 2, 1, "", "save"], [115, 2, 1, "", "startMine"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:method", "3": "py:attribute", "4": "py:function"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "method", "Python method"], "3": ["py", "attribute", "Python attribute"], "4": ["py", "function", "Python function"]}, "titleterms": {"pami": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117], "packag": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115], "subpackag": [0, 1, 3, 5, 7, 22, 24, 31, 33, 35, 37, 39, 41, 43, 45, 48, 50, 52, 54, 58, 61, 63, 65, 67, 74, 76, 82, 85, 87, 89, 92, 95, 98, 102, 104, 106, 108, 110, 112, 114], "modul": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115], "content": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116], "associationrul": [1, 2], "basic": [2, 4, 6, 23, 25, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 62, 64, 66, 68, 75, 77, 86, 88, 90, 93, 96, 99, 103, 105, 107, 109, 111, 113, 115], "submodul": [2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17, 19, 20, 21, 23, 25, 26, 27, 28, 29, 30, 32, 34, 36, 38, 40, 42, 44, 46, 47, 49, 51, 53, 55, 56, 57, 58, 59, 60, 62, 64, 66, 68, 69, 70, 71, 72, 73, 75, 77, 78, 79, 80, 81, 83, 84, 86, 88, 90, 93, 94, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "arwithconfid": 2, "about": [2, 4, 6, 73, 78], "thi": [2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 15, 17, 19, 20, 23, 25, 26, 28, 30, 73, 77, 78, 86, 88, 90, 93, 96, 97, 103, 107, 109, 111, 113, 115], "algorithm": [2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 15, 17, 19, 20, 23, 25, 26, 28, 30, 73, 77, 78, 86, 88, 90, 93, 96, 97, 103, 107, 109, 111, 113, 115], "execut": [2, 4, 6, 10, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "method": [2, 4, 6, 20, 23, 25, 26, 28, 30, 73, 77, 78, 86, 88, 90, 93, 96, 97, 103, 109, 111, 113, 115], "credit": [2, 4, 6, 10, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 78, 80, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113], "arwithleverag": 2, "arwithlift": 2, "rulemin": 2, "abstract": [2, 4, 6, 11, 23, 25, 26, 27, 28, 29, 30, 32, 34, 36, 38, 40, 42, 44, 46, 47, 49, 51, 53, 55, 56, 57, 58, 59, 60, 62, 64, 66, 68, 69, 70, 71, 72, 73, 75, 77, 78, 79, 80, 81, 83, 84, 86, 88, 90, 93, 94, 96, 97, 99, 100, 101, 103, 105, 107, 109, 111, 113, 115], "correlatedpattern": [3, 4], "comin": 4, "comineplu": 4, "coveragepattern": [5, 6], "cmine": 6, "cppg": 6, "code": [6, 10, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 80, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "termin": [6, 10, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 75, 77, 80, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "import": [6, 7, 8, 9, 10, 11, 12, 13, 15, 17, 19, 20, 23, 25, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 77, 80, 84, 86, 88, 90, 93, 96, 97, 101, 103, 105, 107, 109, 111, 113, 115], "python": [6, 7, 8, 9, 10, 11, 12, 13, 15, 17, 19, 20, 23, 25, 26, 28, 30, 77, 86, 88, 90, 93, 96, 97, 103, 107, 109, 111, 113, 115], "program": [6, 7, 8, 9, 10, 11, 12, 13, 15, 17, 19, 20, 23, 25, 26, 28, 30, 77, 86, 88, 90, 93, 96, 97, 103, 107, 109, 111, 113, 115], "extra": [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], "convertmultitsintofuzzi": 7, "generatelatexgraphfil": 7, "plotpointonmap": 7, "plotpointonmap_dump": 7, "scatterplotspatialpoint": 7, "topkpattern": 7, "uncertaindb_convert": 7, "df2db": 8, "df2dbplu": 8, "denseformatdf": 8, "sparseformatdf": 8, "createtdb": 8, "densedf2dbplu": 8, "densedf2db_dump": 8, "sparsedf2dbplu": 8, "calculatemisvalu": 9, "usingbeta": 9, "usingsd": 9, "dbstat": 10, "fuzzydatabas": [10, 20], "multipletimeseriesfuzzydatabasestat": 10, "sequentialdatabas": [10, 19], "sampl": [10, 19, 23, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 80, 84, 93, 101, 105], "run": [10, 19, 23, 32, 34, 36, 38, 40, 42, 44, 46, 49, 51, 53, 55, 59, 60, 62, 64, 68, 69, 72, 73, 75, 80, 84, 93, 101, 105], "temporaldatabas": [10, 19, 20], "transactionaldatabas": [10, 19, 20], "uncertaintemporaldatabas": 10, "uncertaintransactionaldatabas": 10, "utilitydatabas": [10, 19, 20], "fuzzytransform": 11, "temporaltofuzzi": 11, "transactionaltofuzzi": 11, "utilitytofuzzi": 11, "generatedatabas": 12, "generatespatiotemporaldatabas": 12, "generatetemporaldatabas": 12, "generatetransactionaldatabas": 12, "graph": [13, 21, 99, 100], "df2fig": 13, "df2tex": 13, "plotlinegraphfromdictionari": 13, "plotlinegraphsfromdatafram": 13, "visualizefuzzypattern": 13, "visualizepattern": 13, "image2databas": 14, "imageprocess": 15, "imagery2databas": 15, "messag": 16, "discord": 16, "gmail": 16, "neighbour": 17, "findneighborsusingeuclideandistanceforpointinfo": 17, "findneighboursusingeuclidean": 17, "findneighboursusinggeodes": 17, "sampledataset": 18, "stat": 19, "graphdatabas": 19, "syntheticdatagener": 20, "createsyntheticgeoreferentialtempor": 20, "createsyntheticgeoreferentialtransact": 20, "createsyntheticgeoreferentialuncertaintransact": 20, "createsynthetictempor": 20, "createsynthetictransact": 20, "createsyntheticuncertaintempor": 20, "createsyntheticuncertaintransact": 20, "createsyntheticutil": 20, "generatetempor": 20, "generatetransact": 20, "generateuncertaintempor": 20, "generateuncertaintransact": 20, "generateutilitytempor": 20, "generateutilitytransact": 20, "georeferencedtemporaldatabas": 20, "georeferencedtransactionaldatabas": 20, "syntheticutilitydatabas": 20, "temporaldatabasegen": 20, "visual": 21, "faulttolerantfrequentpattern": [22, 23], "ftapriori": 23, "ftfpgrowth": 23, "frequentpattern": [24, 25, 26, 27, 28, 29, 30], "apriori": 25, "eclat": 25, "eclatdiffset": 25, "eclatbitset": 25, "fpgrowth": 25, "close": [26, 69, 78, 94], "charm": 26, "cuda": [27, 79], "cuapriori": 27, "cuaprioribit": 27, "cueclat": 27, "cueclatbit": 27, "cudaapriorigct": 27, "cudaaprioritid": 27, "cudaeclatgct": 27, "maxim": [28, 70, 80], "maxfpgrowth": 28, "pyspark": [29, 71, 81], "parallelapriori": 29, "paralleleclat": 29, "parallelfpgrowth": 29, "topk": [30, 60, 72, 82, 83, 84, 97, 100], "fae": 30, "fuzzycorrelatedpattern": [31, 32], "fcpgrowth": 32, "fuzzyfrequentpattern": [33, 34], "ffimin": 34, "ffiminer_old": 34, "fuzzygeoreferencedfrequentpattern": [35, 36], "ffspminer": 36, "ffspminer_old": 36, "fuzzygeoreferencedperiodicfrequentpattern": [37, 38], "fgpfpminer": 38, "fgpfpminer_old": 38, "fuzzypartialperiodicpattern": [39, 40], "f3pminer": 40, "fuzzyperiodicfrequentpattern": [41, 42], "fpfpminer": 42, "fpfpminer_old": 42, "georeferencedperiodicfrequentpattern": [43, 44], "gpfpminer": 44, "georeferencedfrequentpattern": [45, 46], "fspgrowth": 46, "spatialeclat": 46, "georeferencedfrequentsequencepattern": 47, "georeferencedpartialperiodicpattern": [48, 49], "steclat": 49, "highutilityfrequentpattern": [50, 51], "hufim": 51, "highutilitygeoreferencedfrequentpattern": [52, 53], "shufim": 53, "highutilitypattern": [54, 55, 56], "efim": 55, "hminer": 55, "upgrowth": 55, "efimparallel": [55, 56], "parallel": 56, "highutilitypatternsinstream": 57, "hupm": 57, "shugrowth": 57, "highutilityspatialpattern": [58, 59, 60], "hdshuim": 59, "shuim": 59, "tkshuim": 60, "localperiodicpattern": [61, 62], "lppgrowth": 62, "lppmbreadth": 62, "lppmdepth": 62, "multipleminimumsupportbasedfrequentpattern": [63, 64], "cfpgrowth": 64, "cfpgrowthplu": 64, "partialperiodicfrequentpattern": [65, 66], "gpfgrowth": 66, "ppf_df": 66, "partialperiodicpattern": [67, 68, 69, 70, 71, 72], "gthreepgrowth": 68, "gabstract": 68, "pppgrowth": 68, "ppp_eclat": 68, "pppclose": 69, "max3pgrowth": 70, "parallel3pgrowth": 71, "k3pminer": 72, "partialperiodicpatterninmultipletimeseri": 73, "ppgrowth": 73, "periodiccorrelatedpattern": [74, 75], "epcpgrowth": 75, "periodicfrequentpattern": [76, 77, 78, 79, 80, 81, 82, 83, 84], "pfeclat": 77, "pfpgrowth": 77, "pfpgrowthplu": 77, "pfpmc": 77, "psgrowth": 77, "parallelpfpgrowth": [77, 81], "cpfpminer": 78, "cugpfmin": 79, "gpfminerbit": 79, "maxpfgrowth": 80, "topkpfp": 83, "kpfpminer": 84, "recurringpattern": [85, 86], "rpgrowth": 86, "relativefrequentpattern": [87, 88], "rsfpgrowth": 88, "relativehighutilitypattern": [89, 90], "rhuim": 90, "sequenc": 91, "sequentialpatternmin": [92, 93, 94], "spade": 93, "spam": 93, "prefixspan": 93, "bide": 94, "stableperiodicfrequentpattern": [95, 96, 97], "sppeclat": 96, "sppgrowth": 96, "sppgrowthdump": 96, "tspin": 97, "subgraphmin": [98, 99, 100], "dfscode": [99, 100], "edg": [99, 100], "extendededg": [99, 100], "frequentsubgraph": [99, 100], "gspan": 99, "sparsetriangularmatrix": [99, 100], "vertex": [99, 100], "dfsthread": 100, "tkg": 100, "uncertainfaulttolerantfrequentpattern": 101, "vbftmine": 101, "uncertainfrequentpattern": [102, 103], "cufptre": 103, "pufgrowth": 103, "tufp": 103, "tubep": 103, "tube": 103, "ufgrowth": 103, "uveclat": 103, "uncertaingeoreferencedfrequentpattern": [104, 105], "gfpgrowth": 105, "uncertainperiodicfrequentpattern": [106, 107], "upfpgrowth": 107, "upfpgrowthplu": 107, "weightedfrequentneighbourhoodpattern": [108, 109], "swfpgrowth": 109, "weightedfrequentpattern": [110, 111], "wfim": 111, "weightedfrequentregularpattern": [112, 113], "wfrimin": 113, "weighteduncertainfrequentpattern": [114, 115], "wufim": 115, "welcom": 116, "": 116, "document": 116, "indic": 116, "tabl": 116}, "envversion": {"sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.viewcode": 1, "sphinx.ext.intersphinx": 1, "sphinx": 60}, "alltitles": {"PAMI package": [[0, "pami-package"]], "Subpackages": [[0, "subpackages"], [1, "subpackages"], [3, "subpackages"], [5, "subpackages"], [7, "subpackages"], [22, "subpackages"], [24, "subpackages"], [31, "subpackages"], [33, "subpackages"], [35, "subpackages"], [37, "subpackages"], [39, "subpackages"], [41, "subpackages"], [43, "subpackages"], [45, "subpackages"], [48, "subpackages"], [50, "subpackages"], [52, "subpackages"], [54, "subpackages"], [58, "subpackages"], [61, "subpackages"], [63, "subpackages"], [65, "subpackages"], [67, "subpackages"], [74, "subpackages"], [76, "subpackages"], [82, "subpackages"], [85, "subpackages"], [87, "subpackages"], [89, "subpackages"], [92, "subpackages"], [95, "subpackages"], [98, "subpackages"], [102, "subpackages"], [104, "subpackages"], [106, "subpackages"], [108, "subpackages"], [110, "subpackages"], [112, "subpackages"], [114, "subpackages"]], "Module contents": [[0, "module-PAMI"], [1, "module-PAMI.AssociationRules"], [2, "module-PAMI.AssociationRules.basic"], [3, "module-PAMI.correlatedPattern"], [4, "module-PAMI.correlatedPattern.basic"], [5, "module-PAMI.coveragePattern"], [6, "module-PAMI.coveragePattern.basic"], [7, "module-PAMI.extras"], [8, "module-PAMI.extras.DF2DB"], [9, "module-PAMI.extras.calculateMISValues"], [10, "module-PAMI.extras.dbStats"], [11, "module-PAMI.extras.fuzzyTransformation"], [12, "module-PAMI.extras.generateDatabase"], [13, "module-PAMI.extras.graph"], [14, "module-PAMI.extras.image2Database"], [15, "module-PAMI.extras.imageProcessing"], [16, "module-PAMI.extras.messaging"], [17, "module-PAMI.extras.neighbours"], [18, "module-PAMI.extras.sampleDatasets"], [19, "module-PAMI.extras.stats"], [20, "module-PAMI.extras.syntheticDataGenerator"], [21, "module-PAMI.extras.visualize"], [22, "module-PAMI.faultTolerantFrequentPattern"], [23, "module-PAMI.faultTolerantFrequentPattern.basic"], [24, "module-PAMI.frequentPattern"], [25, "module-PAMI.frequentPattern.basic"], [26, "module-PAMI.frequentPattern.closed"], [27, "module-PAMI.frequentPattern.cuda"], [28, "module-PAMI.frequentPattern.maximal"], [29, "module-PAMI.frequentPattern.pyspark"], [30, "module-PAMI.frequentPattern.topk"], [31, "module-PAMI.fuzzyCorrelatedPattern"], [32, "module-PAMI.fuzzyCorrelatedPattern.basic"], [33, "module-PAMI.fuzzyFrequentPattern"], [34, "module-PAMI.fuzzyFrequentPattern.basic"], [35, "module-PAMI.fuzzyGeoreferencedFrequentPattern"], [36, "module-PAMI.fuzzyGeoreferencedFrequentPattern.basic"], [37, "module-PAMI.fuzzyGeoreferencedPeriodicFrequentPattern"], [38, "module-PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic"], [39, "module-PAMI.fuzzyPartialPeriodicPatterns"], [40, "module-PAMI.fuzzyPartialPeriodicPatterns.basic"], [41, "module-PAMI.fuzzyPeriodicFrequentPattern"], [42, "module-PAMI.fuzzyPeriodicFrequentPattern.basic"], [43, "module-PAMI.geoReferencedPeriodicFrequentPattern"], [44, "module-PAMI.geoReferencedPeriodicFrequentPattern.basic"], [45, "module-PAMI.georeferencedFrequentPattern"], [46, "module-PAMI.georeferencedFrequentPattern.basic"], [47, "module-PAMI.georeferencedFrequentSequencePattern"], [48, "module-PAMI.georeferencedPartialPeriodicPattern"], [49, "module-PAMI.georeferencedPartialPeriodicPattern.basic"], [50, "module-PAMI.highUtilityFrequentPattern"], [51, "module-PAMI.highUtilityFrequentPattern.basic"], [52, "module-PAMI.highUtilityGeoreferencedFrequentPattern"], [53, "module-PAMI.highUtilityGeoreferencedFrequentPattern.basic"], [54, "module-PAMI.highUtilityPattern"], [55, "module-PAMI.highUtilityPattern.basic"], [56, "module-PAMI.highUtilityPattern.parallel"], [57, "module-PAMI.highUtilityPatternsInStreams"], [58, "module-PAMI.highUtilitySpatialPattern"], [59, "module-PAMI.highUtilitySpatialPattern.basic"], [60, "module-PAMI.highUtilitySpatialPattern.topk"], [61, "module-PAMI.localPeriodicPattern"], [62, "module-PAMI.localPeriodicPattern.basic"], [63, "module-PAMI.multipleMinimumSupportBasedFrequentPattern"], [64, "module-PAMI.multipleMinimumSupportBasedFrequentPattern.basic"], [65, "module-PAMI.partialPeriodicFrequentPattern"], [66, "module-PAMI.partialPeriodicFrequentPattern.basic"], [67, "module-PAMI.partialPeriodicPattern"], [68, "module-PAMI.partialPeriodicPattern.basic"], [69, "module-PAMI.partialPeriodicPattern.closed"], [70, "module-PAMI.partialPeriodicPattern.maximal"], [71, "module-PAMI.partialPeriodicPattern.pyspark"], [72, "module-PAMI.partialPeriodicPattern.topk"], [73, "module-PAMI.partialPeriodicPatternInMultipleTimeSeries"], [74, "module-PAMI.periodicCorrelatedPattern"], [75, "module-PAMI.periodicCorrelatedPattern.basic"], [76, "module-PAMI.periodicFrequentPattern"], [77, "module-PAMI.periodicFrequentPattern.basic"], [78, "module-PAMI.periodicFrequentPattern.closed"], [79, "module-PAMI.periodicFrequentPattern.cuda"], [80, "module-PAMI.periodicFrequentPattern.maximal"], [81, "module-PAMI.periodicFrequentPattern.pyspark"], [82, "module-PAMI.periodicFrequentPattern.topk"], [83, "module-PAMI.periodicFrequentPattern.topk.TopkPFP"], [84, "module-PAMI.periodicFrequentPattern.topk.kPFPMiner"], [85, "module-PAMI.recurringPattern"], [86, "module-PAMI.recurringPattern.basic"], [87, "module-PAMI.relativeFrequentPattern"], [88, "module-PAMI.relativeFrequentPattern.basic"], [89, "module-PAMI.relativeHighUtilityPattern"], [90, "module-PAMI.relativeHighUtilityPattern.basic"], [91, "module-PAMI.sequence"], [92, "module-PAMI.sequentialPatternMining"], [93, "module-PAMI.sequentialPatternMining.basic"], [94, "module-PAMI.sequentialPatternMining.closed"], [95, "module-PAMI.stablePeriodicFrequentPattern"], [96, "module-PAMI.stablePeriodicFrequentPattern.basic"], [97, "module-PAMI.stablePeriodicFrequentPattern.topK"], [98, "module-PAMI.subgraphMining"], [99, "module-PAMI.subgraphMining.basic"], [100, "module-PAMI.subgraphMining.topK"], [101, "module-PAMI.uncertainFaultTolerantFrequentPattern"], [102, "module-PAMI.uncertainFrequentPattern"], [103, "module-PAMI.uncertainFrequentPattern.basic"], [104, "module-PAMI.uncertainGeoreferencedFrequentPattern"], [105, "module-PAMI.uncertainGeoreferencedFrequentPattern.basic"], [106, "module-PAMI.uncertainPeriodicFrequentPattern"], [107, "module-PAMI.uncertainPeriodicFrequentPattern.basic"], [108, "module-PAMI.weightedFrequentNeighbourhoodPattern"], [109, "module-PAMI.weightedFrequentNeighbourhoodPattern.basic"], [110, "module-PAMI.weightedFrequentPattern"], [111, "module-PAMI.weightedFrequentPattern.basic"], [112, "module-PAMI.weightedFrequentRegularPattern"], [113, "module-PAMI.weightedFrequentRegularPattern.basic"], [114, "module-PAMI.weightedUncertainFrequentPattern"], [115, "module-PAMI.weightedUncertainFrequentPattern.basic"]], "PAMI.AssociationRules package": [[1, "pami-associationrules-package"]], "PAMI.AssociationRules.basic package": [[2, "pami-associationrules-basic-package"]], "Submodules": [[2, "submodules"], [4, "submodules"], [6, "submodules"], [7, "submodules"], [8, "submodules"], [9, "submodules"], [10, "submodules"], [11, "submodules"], [12, "submodules"], [13, "submodules"], [15, "submodules"], [16, "submodules"], [17, "submodules"], [19, "submodules"], [20, "submodules"], [21, "submodules"], [23, "submodules"], [25, "submodules"], [26, "submodules"], [27, "submodules"], [28, "submodules"], [29, "submodules"], [30, "submodules"], [32, "submodules"], [34, "submodules"], [36, "submodules"], [38, "submodules"], [40, "submodules"], [42, "submodules"], [44, "submodules"], [46, "submodules"], [47, "submodules"], [49, "submodules"], [51, "submodules"], [53, "submodules"], [55, "submodules"], [56, "submodules"], [57, "submodules"], [58, "submodules"], [59, "submodules"], [60, "submodules"], [62, "submodules"], [64, "submodules"], [66, "submodules"], [68, "submodules"], [69, "submodules"], [70, "submodules"], [71, "submodules"], [72, "submodules"], [73, "submodules"], [75, "submodules"], [77, "submodules"], [78, "submodules"], [79, "submodules"], [80, "submodules"], [81, "submodules"], [83, "submodules"], [84, "submodules"], [86, "submodules"], [88, "submodules"], [90, "submodules"], [93, "submodules"], [94, "submodules"], [96, "submodules"], [97, "submodules"], [99, "submodules"], [100, "submodules"], [101, "submodules"], [103, "submodules"], [105, "submodules"], [107, "submodules"], [109, "submodules"], [111, "submodules"], [113, "submodules"], [115, "submodules"]], "PAMI.AssociationRules.basic.ARWithConfidence module": [[2, "module-PAMI.AssociationRules.basic.ARWithConfidence"]], "About this algorithm": [[2, "about-this-algorithm"], [2, "id1"], [2, "id4"], [2, "id7"], [4, "about-this-algorithm"], [4, "id1"], [6, "about-this-algorithm"], [73, "about-this-algorithm"], [78, "about-this-algorithm"]], "Execution methods": [[2, "execution-methods"], [2, "id2"], [2, "id5"], [2, "id8"], [4, "execution-methods"], [4, "id2"], [6, "execution-methods"], [73, "execution-methods"], [78, "execution-methods"]], "Credits": [[2, "credits"], [2, "id3"], [2, "id6"], [4, "credits"], [4, "id3"], [6, "credits"]], "PAMI.AssociationRules.basic.ARWithLeverage module": [[2, "module-PAMI.AssociationRules.basic.ARWithLeverage"]], "PAMI.AssociationRules.basic.ARWithLift module": [[2, "module-PAMI.AssociationRules.basic.ARWithLift"]], "PAMI.AssociationRules.basic.RuleMiner module": [[2, "module-PAMI.AssociationRules.basic.RuleMiner"]], "PAMI.AssociationRules.basic.abstract module": [[2, "module-PAMI.AssociationRules.basic.abstract"]], "PAMI.correlatedPattern package": [[3, "pami-correlatedpattern-package"]], "PAMI.correlatedPattern.basic package": [[4, "pami-correlatedpattern-basic-package"]], "PAMI.correlatedPattern.basic.CoMine module": [[4, "module-PAMI.correlatedPattern.basic.CoMine"]], "PAMI.correlatedPattern.basic.CoMinePlus module": [[4, "module-PAMI.correlatedPattern.basic.CoMinePlus"]], "PAMI.correlatedPattern.basic.abstract module": [[4, "module-PAMI.correlatedPattern.basic.abstract"]], "PAMI.coveragePattern package": [[5, "pami-coveragepattern-package"]], "PAMI.coveragePattern.basic package": [[6, "pami-coveragepattern-basic-package"]], "PAMI.coveragePattern.basic.CMine module": [[6, "module-PAMI.coveragePattern.basic.CMine"]], "PAMI.coveragePattern.basic.CPPG module": [[6, "module-PAMI.coveragePattern.basic.CPPG"]], "Methods to execute code on terminal": [[6, "methods-to-execute-code-on-terminal"], [20, "methods-to-execute-code-on-terminal"], [20, "id1"], [23, "methods-to-execute-code-on-terminal"], [25, "methods-to-execute-code-on-terminal"], [25, "id1"], [25, "id4"], [25, "id7"], [25, "id10"], [26, "methods-to-execute-code-on-terminal"], [28, "methods-to-execute-code-on-terminal"], [30, "methods-to-execute-code-on-terminal"], [77, "methods-to-execute-code-on-terminal"], [77, "id2"], [77, "id4"], [77, "id7"], [86, "methods-to-execute-code-on-terminal"], [88, "methods-to-execute-code-on-terminal"], [90, "methods-to-execute-code-on-terminal"], [93, "methods-to-execute-code-on-terminal"], [93, "id2"], [96, "methods-to-execute-code-on-terminal"], [97, "methods-to-execute-code-on-terminal"], [103, "methods-to-execute-code-on-terminal"], [109, "methods-to-execute-code-on-terminal"], [111, "methods-to-execute-code-on-terminal"], [113, "methods-to-execute-code-on-terminal"], [115, "methods-to-execute-code-on-terminal"]], "Importing this algorithm into a python program": [[6, "importing-this-algorithm-into-a-python-program"], [7, "importing-this-algorithm-into-a-python-program"], [7, "id1"], [7, "id2"], [7, "id3"], [7, "id4"], [7, "id5"], [8, "importing-this-algorithm-into-a-python-program"], [8, "id1"], [8, "id2"], [8, "id3"], [8, "id4"], [8, "id5"], [8, "id6"], [9, "importing-this-algorithm-into-a-python-program"], [9, "id1"], [10, "importing-this-algorithm-into-a-python-program"], [10, "id1"], [10, "id2"], [10, "id3"], [10, "id4"], [10, "id5"], [10, "id6"], [11, "importing-this-algorithm-into-a-python-program"], [11, "id1"], [12, "importing-this-algorithm-into-a-python-program"], [12, "id1"], [13, "importing-this-algorithm-into-a-python-program"], [13, "id1"], [13, "id2"], [13, "id3"], [15, "importing-this-algorithm-into-a-python-program"], [17, "importing-this-algorithm-into-a-python-program"], [17, "id1"], [17, "id2"], [19, "importing-this-algorithm-into-a-python-program"], [19, "id1"], [19, "id2"], [19, "id3"], [20, "importing-this-algorithm-into-a-python-program"], [20, "id2"], [23, "importing-this-algorithm-into-a-python-program"], [25, "importing-this-algorithm-into-a-python-program"], [25, "id2"], [25, "id5"], [25, "id8"], [25, "id11"], [26, "importing-this-algorithm-into-a-python-program"], [28, "importing-this-algorithm-into-a-python-program"], [30, "importing-this-algorithm-into-a-python-program"], [77, "importing-this-algorithm-into-a-python-program"], [77, "id3"], [77, "id5"], [77, "id8"], [86, "importing-this-algorithm-into-a-python-program"], [88, "importing-this-algorithm-into-a-python-program"], [90, "importing-this-algorithm-into-a-python-program"], [93, "importing-this-algorithm-into-a-python-program"], [93, "id3"], [96, "importing-this-algorithm-into-a-python-program"], [97, "importing-this-algorithm-into-a-python-program"], [103, "importing-this-algorithm-into-a-python-program"], [107, "importing-this-algorithm-into-a-python-program"], [107, "id2"], [109, "importing-this-algorithm-into-a-python-program"], [111, "importing-this-algorithm-into-a-python-program"], [113, "importing-this-algorithm-into-a-python-program"], [115, "importing-this-algorithm-into-a-python-program"]], "Credits:": [[6, "id1"], [10, "credits"], [19, "credits"], [20, "credits"], [20, "id3"], [20, "id4"], [20, "id5"], [20, "id6"], [20, "id7"], [20, "id8"], [20, "id9"], [23, "credits"], [23, "id1"], [25, "credits"], [25, "id3"], [25, "id6"], [25, "id9"], [25, "id12"], [26, "credits"], [28, "credits"], [30, "credits"], [32, "credits"], [34, "credits"], [34, "id3"], [36, "credits"], [36, "id3"], [38, "credits"], [38, "id3"], [40, "credits"], [42, "credits"], [42, "id3"], [44, "credits"], [46, "credits"], [49, "credits"], [51, "credits"], [53, "credits"], [55, "credits"], [55, "id3"], [55, "id6"], [59, "credits"], [59, "id3"], [60, "credits"], [62, "credits"], [62, "id3"], [62, "id6"], [64, "credits"], [64, "id3"], [68, "credits"], [68, "id2"], [69, "credits"], [72, "credits"], [73, "credits"], [75, "credits"], [77, "credits"], [77, "id1"], [77, "id6"], [77, "id9"], [78, "credits"], [80, "credits"], [84, "credits"], [86, "credits"], [88, "credits"], [90, "credits"], [93, "credits"], [93, "id1"], [93, "id4"], [96, "credits"], [97, "credits"], [101, "credits"], [103, "credits"], [105, "credits"], [107, "credits"], [107, "id3"], [109, "credits"], [111, "credits"], [113, "credits"]], "PAMI.coveragePattern.basic.abstract module": [[6, "module-PAMI.coveragePattern.basic.abstract"]], "PAMI.extras package": [[7, "pami-extras-package"]], "PAMI.extras.convertMultiTSIntoFuzzy module": [[7, "pami-extras-convertmultitsintofuzzy-module"]], "PAMI.extras.generateLatexGraphFile module": [[7, "module-PAMI.extras.generateLatexGraphFile"]], "PAMI.extras.plotPointOnMap module": [[7, "module-PAMI.extras.plotPointOnMap"]], "PAMI.extras.plotPointOnMap_dump module": [[7, "module-PAMI.extras.plotPointOnMap_dump"]], "PAMI.extras.scatterPlotSpatialPoints module": [[7, "module-PAMI.extras.scatterPlotSpatialPoints"]], "PAMI.extras.topKPatterns module": [[7, "module-PAMI.extras.topKPatterns"]], "PAMI.extras.uncertaindb_convert module": [[7, "module-PAMI.extras.uncertaindb_convert"]], "PAMI.extras.DF2DB package": [[8, "pami-extras-df2db-package"]], "PAMI.extras.DF2DB.DF2DB module": [[8, "module-PAMI.extras.DF2DB.DF2DB"]], "PAMI.extras.DF2DB.DF2DBPlus module": [[8, "pami-extras-df2db-df2dbplus-module"]], "PAMI.extras.DF2DB.DenseFormatDF module": [[8, "module-PAMI.extras.DF2DB.DenseFormatDF"]], "PAMI.extras.DF2DB.SparseFormatDF module": [[8, "module-PAMI.extras.DF2DB.SparseFormatDF"]], "PAMI.extras.DF2DB.createTDB module": [[8, "module-PAMI.extras.DF2DB.createTDB"]], "PAMI.extras.DF2DB.denseDF2DBPlus module": [[8, "module-PAMI.extras.DF2DB.denseDF2DBPlus"]], "PAMI.extras.DF2DB.denseDF2DB_dump module": [[8, "module-PAMI.extras.DF2DB.denseDF2DB_dump"]], "PAMI.extras.DF2DB.sparseDF2DBPlus module": [[8, "module-PAMI.extras.DF2DB.sparseDF2DBPlus"]], "PAMI.extras.calculateMISValues package": [[9, "pami-extras-calculatemisvalues-package"]], "PAMI.extras.calculateMISValues.usingBeta module": [[9, "module-PAMI.extras.calculateMISValues.usingBeta"]], "PAMI.extras.calculateMISValues.usingSD module": [[9, "module-PAMI.extras.calculateMISValues.usingSD"]], "PAMI.extras.dbStats package": [[10, "pami-extras-dbstats-package"]], "PAMI.extras.dbStats.FuzzyDatabase module": [[10, "module-PAMI.extras.dbStats.FuzzyDatabase"]], "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats module": [[10, "module-PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats"]], "PAMI.extras.dbStats.SequentialDatabase module": [[10, "module-PAMI.extras.dbStats.SequentialDatabase"]], "Executing the code on terminal:": [[10, "executing-the-code-on-terminal"], [19, "executing-the-code-on-terminal"], [23, "executing-the-code-on-terminal"], [55, "executing-the-code-on-terminal"], [55, "id1"], [55, "id4"], [59, "executing-the-code-on-terminal"], [59, "id1"], [60, "executing-the-code-on-terminal"], [62, "executing-the-code-on-terminal"], [62, "id1"], [62, "id4"], [64, "executing-the-code-on-terminal"], [64, "id1"], [68, "executing-the-code-on-terminal"], [68, "id1"], [69, "executing-the-code-on-terminal"], [72, "executing-the-code-on-terminal"], [75, "executing-the-code-on-terminal"], [80, "executing-the-code-on-terminal"], [84, "executing-the-code-on-terminal"], [93, "executing-the-code-on-terminal"], [101, "executing-the-code-on-terminal"], [105, "executing-the-code-on-terminal"], [107, "executing-the-code-on-terminal"], [107, "id1"]], "Sample run of the importing code:": [[10, "sample-run-of-the-importing-code"], [19, "sample-run-of-the-importing-code"], [23, "sample-run-of-the-importing-code"], [64, "sample-run-of-the-importing-code"], [64, "id2"], [68, "sample-run-of-the-importing-code"], [72, "sample-run-of-the-importing-code"], [93, "sample-run-of-the-importing-code"], [101, "sample-run-of-the-importing-code"]], "PAMI.extras.dbStats.TemporalDatabase module": [[10, "module-PAMI.extras.dbStats.TemporalDatabase"]], "PAMI.extras.dbStats.TransactionalDatabase module": [[10, "module-PAMI.extras.dbStats.TransactionalDatabase"]], "PAMI.extras.dbStats.UncertainTemporalDatabase module": [[10, "module-PAMI.extras.dbStats.UncertainTemporalDatabase"]], "PAMI.extras.dbStats.UncertainTransactionalDatabase module": [[10, "module-PAMI.extras.dbStats.UncertainTransactionalDatabase"]], "PAMI.extras.dbStats.UtilityDatabase module": [[10, "module-PAMI.extras.dbStats.UtilityDatabase"]], "PAMI.extras.fuzzyTransformation package": [[11, "pami-extras-fuzzytransformation-package"]], "PAMI.extras.fuzzyTransformation.abstract module": [[11, "module-PAMI.extras.fuzzyTransformation.abstract"]], "PAMI.extras.fuzzyTransformation.temporalToFuzzy module": [[11, "module-PAMI.extras.fuzzyTransformation.temporalToFuzzy"]], "PAMI.extras.fuzzyTransformation.transactionalToFuzzy module": [[11, "module-PAMI.extras.fuzzyTransformation.transactionalToFuzzy"]], "PAMI.extras.fuzzyTransformation.utilityToFuzzy module": [[11, "pami-extras-fuzzytransformation-utilitytofuzzy-module"]], "PAMI.extras.generateDatabase package": [[12, "pami-extras-generatedatabase-package"]], "PAMI.extras.generateDatabase.generateSpatioTemporalDatabase module": [[12, "module-PAMI.extras.generateDatabase.generateSpatioTemporalDatabase"]], "PAMI.extras.generateDatabase.generateTemporalDatabase module": [[12, "module-PAMI.extras.generateDatabase.generateTemporalDatabase"]], "PAMI.extras.generateDatabase.generateTransactionalDatabase module": [[12, "module-PAMI.extras.generateDatabase.generateTransactionalDatabase"]], "PAMI.extras.graph package": [[13, "pami-extras-graph-package"]], "PAMI.extras.graph.DF2Fig module": [[13, "module-PAMI.extras.graph.DF2Fig"]], "PAMI.extras.graph.DF2Tex module": [[13, "pami-extras-graph-df2tex-module"]], "PAMI.extras.graph.plotLineGraphFromDictionary module": [[13, "module-PAMI.extras.graph.plotLineGraphFromDictionary"]], "PAMI.extras.graph.plotLineGraphsFromDataFrame module": [[13, "module-PAMI.extras.graph.plotLineGraphsFromDataFrame"]], "PAMI.extras.graph.visualizeFuzzyPatterns module": [[13, "module-PAMI.extras.graph.visualizeFuzzyPatterns"]], "PAMI.extras.graph.visualizePatterns module": [[13, "module-PAMI.extras.graph.visualizePatterns"]], "PAMI.extras.image2Database package": [[14, "pami-extras-image2database-package"]], "PAMI.extras.imageProcessing package": [[15, "pami-extras-imageprocessing-package"]], "PAMI.extras.imageProcessing.imagery2Databases module": [[15, "module-PAMI.extras.imageProcessing.imagery2Databases"]], "PAMI.extras.messaging package": [[16, "pami-extras-messaging-package"]], "PAMI.extras.messaging.discord module": [[16, "module-PAMI.extras.messaging.discord"]], "PAMI.extras.messaging.gmail module": [[16, "module-PAMI.extras.messaging.gmail"]], "PAMI.extras.neighbours package": [[17, "pami-extras-neighbours-package"]], "PAMI.extras.neighbours.findNeighborsUsingEuclideanDistanceforPointInfo module": [[17, "module-PAMI.extras.neighbours.findNeighborsUsingEuclideanDistanceforPointInfo"]], "PAMI.extras.neighbours.findNeighboursUsingEuclidean module": [[17, "module-PAMI.extras.neighbours.findNeighboursUsingEuclidean"]], "PAMI.extras.neighbours.findNeighboursUsingGeodesic module": [[17, "module-PAMI.extras.neighbours.findNeighboursUsingGeodesic"]], "PAMI.extras.sampleDatasets package": [[18, "pami-extras-sampledatasets-package"]], "PAMI.extras.stats package": [[19, "pami-extras-stats-package"]], "PAMI.extras.stats.TransactionalDatabase module": [[19, "module-PAMI.extras.stats.TransactionalDatabase"]], "PAMI.extras.stats.graphDatabase module": [[19, "module-PAMI.extras.stats.graphDatabase"]], "PAMI.extras.stats.sequentialDatabase module": [[19, "module-PAMI.extras.stats.sequentialDatabase"]], "PAMI.extras.stats.temporalDatabase module": [[19, "module-PAMI.extras.stats.temporalDatabase"]], "PAMI.extras.stats.utilityDatabase module": [[19, "module-PAMI.extras.stats.utilityDatabase"]], "PAMI.extras.syntheticDataGenerator package": [[20, "pami-extras-syntheticdatagenerator-package"]], "PAMI.extras.syntheticDataGenerator.TemporalDatabase module": [[20, "module-PAMI.extras.syntheticDataGenerator.TemporalDatabase"]], "PAMI.extras.syntheticDataGenerator.TransactionalDatabase module": [[20, "module-PAMI.extras.syntheticDataGenerator.TransactionalDatabase"]], "PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTemporal module": [[20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTemporal"]], "PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTransactions module": [[20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTransactions"]], "PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialUncertainTransaction module": [[20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialUncertainTransaction"]], "PAMI.extras.syntheticDataGenerator.createSyntheticTemporal module": [[20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticTemporal"]], "PAMI.extras.syntheticDataGenerator.createSyntheticTransactions module": [[20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticTransactions"]], "PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTemporal module": [[20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTemporal"]], "PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTransactions module": [[20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTransactions"]], "PAMI.extras.syntheticDataGenerator.createSyntheticUtility module": [[20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticUtility"]], "PAMI.extras.syntheticDataGenerator.fuzzyDatabase module": [[20, "module-PAMI.extras.syntheticDataGenerator.fuzzyDatabase"]], "PAMI.extras.syntheticDataGenerator.generateTemporal module": [[20, "module-PAMI.extras.syntheticDataGenerator.generateTemporal"]], "PAMI.extras.syntheticDataGenerator.generateTransactional module": [[20, "module-PAMI.extras.syntheticDataGenerator.generateTransactional"]], "PAMI.extras.syntheticDataGenerator.generateUncertainTemporal module": [[20, "module-PAMI.extras.syntheticDataGenerator.generateUncertainTemporal"]], "PAMI.extras.syntheticDataGenerator.generateUncertainTransactional module": [[20, "module-PAMI.extras.syntheticDataGenerator.generateUncertainTransactional"]], "PAMI.extras.syntheticDataGenerator.generateUtilityTemporal module": [[20, "module-PAMI.extras.syntheticDataGenerator.generateUtilityTemporal"]], "PAMI.extras.syntheticDataGenerator.generateUtilityTransactional module": [[20, "module-PAMI.extras.syntheticDataGenerator.generateUtilityTransactional"]], "PAMI.extras.syntheticDataGenerator.georeferencedTemporalDatabase module": [[20, "module-PAMI.extras.syntheticDataGenerator.georeferencedTemporalDatabase"]], "PAMI.extras.syntheticDataGenerator.georeferencedTransactionalDatabase module": [[20, "module-PAMI.extras.syntheticDataGenerator.georeferencedTransactionalDatabase"]], "PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase module": [[20, "module-PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase"]], "PAMI.extras.syntheticDataGenerator.temporalDatabaseGen module": [[20, "module-PAMI.extras.syntheticDataGenerator.temporalDatabaseGen"]], "PAMI.extras.syntheticDataGenerator.utilityDatabase module": [[20, "module-PAMI.extras.syntheticDataGenerator.utilityDatabase"]], "PAMI.extras.visualize package": [[21, "pami-extras-visualize-package"]], "PAMI.extras.visualize.graphs module": [[21, "module-PAMI.extras.visualize.graphs"]], "PAMI.faultTolerantFrequentPattern package": [[22, "pami-faulttolerantfrequentpattern-package"]], "PAMI.faultTolerantFrequentPattern.basic package": [[23, "pami-faulttolerantfrequentpattern-basic-package"]], "PAMI.faultTolerantFrequentPattern.basic.FTApriori module": [[23, "module-PAMI.faultTolerantFrequentPattern.basic.FTApriori"]], "PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth module": [[23, "module-PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth"]], "PAMI.faultTolerantFrequentPattern.basic.abstract module": [[23, "module-PAMI.faultTolerantFrequentPattern.basic.abstract"]], "PAMI.frequentPattern package": [[24, "pami-frequentpattern-package"]], "PAMI.frequentPattern.basic package": [[25, "pami-frequentpattern-basic-package"]], "PAMI.frequentPattern.basic.Apriori module": [[25, "module-PAMI.frequentPattern.basic.Apriori"]], "PAMI.frequentPattern.basic.ECLAT module": [[25, "module-PAMI.frequentPattern.basic.ECLAT"]], "PAMI.frequentPattern.basic.ECLATDiffset module": [[25, "module-PAMI.frequentPattern.basic.ECLATDiffset"]], "PAMI.frequentPattern.basic.ECLATbitset module": [[25, "module-PAMI.frequentPattern.basic.ECLATbitset"]], "PAMI.frequentPattern.basic.FPGrowth module": [[25, "module-PAMI.frequentPattern.basic.FPGrowth"]], "PAMI.frequentPattern.basic.abstract module": [[25, "module-PAMI.frequentPattern.basic.abstract"]], "PAMI.frequentPattern.closed package": [[26, "pami-frequentpattern-closed-package"]], "PAMI.frequentPattern.closed.CHARM module": [[26, "module-PAMI.frequentPattern.closed.CHARM"]], "PAMI.frequentPattern.closed.abstract module": [[26, "module-PAMI.frequentPattern.closed.abstract"]], "PAMI.frequentPattern.cuda package": [[27, "pami-frequentpattern-cuda-package"]], "PAMI.frequentPattern.cuda.abstract module": [[27, "pami-frequentpattern-cuda-abstract-module"]], "PAMI.frequentPattern.cuda.cuApriori module": [[27, "pami-frequentpattern-cuda-cuapriori-module"]], "PAMI.frequentPattern.cuda.cuAprioriBit module": [[27, "pami-frequentpattern-cuda-cuaprioribit-module"]], "PAMI.frequentPattern.cuda.cuEclat module": [[27, "pami-frequentpattern-cuda-cueclat-module"]], "PAMI.frequentPattern.cuda.cuEclatBit module": [[27, "pami-frequentpattern-cuda-cueclatbit-module"]], "PAMI.frequentPattern.cuda.cudaAprioriGCT module": [[27, "pami-frequentpattern-cuda-cudaapriorigct-module"]], "PAMI.frequentPattern.cuda.cudaAprioriTID module": [[27, "pami-frequentpattern-cuda-cudaaprioritid-module"]], "PAMI.frequentPattern.cuda.cudaEclatGCT module": [[27, "pami-frequentpattern-cuda-cudaeclatgct-module"]], "PAMI.frequentPattern.maximal package": [[28, "pami-frequentpattern-maximal-package"]], "PAMI.frequentPattern.maximal.MaxFPGrowth module": [[28, "module-PAMI.frequentPattern.maximal.MaxFPGrowth"]], "PAMI.frequentPattern.maximal.abstract module": [[28, "module-PAMI.frequentPattern.maximal.abstract"]], "PAMI.frequentPattern.pyspark package": [[29, "pami-frequentpattern-pyspark-package"]], "PAMI.frequentPattern.pyspark.abstract module": [[29, "pami-frequentpattern-pyspark-abstract-module"]], "PAMI.frequentPattern.pyspark.parallelApriori module": [[29, "pami-frequentpattern-pyspark-parallelapriori-module"]], "PAMI.frequentPattern.pyspark.parallelECLAT module": [[29, "pami-frequentpattern-pyspark-paralleleclat-module"]], "PAMI.frequentPattern.pyspark.parallelFPGrowth module": [[29, "pami-frequentpattern-pyspark-parallelfpgrowth-module"]], "PAMI.frequentPattern.topk package": [[30, "pami-frequentpattern-topk-package"]], "PAMI.frequentPattern.topk.FAE module": [[30, "module-PAMI.frequentPattern.topk.FAE"]], "PAMI.frequentPattern.topk.abstract module": [[30, "module-PAMI.frequentPattern.topk.abstract"]], "PAMI.fuzzyCorrelatedPattern package": [[31, "pami-fuzzycorrelatedpattern-package"]], "PAMI.fuzzyCorrelatedPattern.basic package": [[32, "pami-fuzzycorrelatedpattern-basic-package"]], "PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth module": [[32, "module-PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth"]], "Executing the code on terminal :": [[32, "executing-the-code-on-terminal"], [34, "executing-the-code-on-terminal"], [34, "id1"], [36, "executing-the-code-on-terminal"], [36, "id1"], [38, "executing-the-code-on-terminal"], [38, "id1"], [40, "executing-the-code-on-terminal"], [42, "executing-the-code-on-terminal"], [42, "id1"], [44, "executing-the-code-on-terminal"], [46, "executing-the-code-on-terminal"], [49, "executing-the-code-on-terminal"], [53, "executing-the-code-on-terminal"]], "Sample run of importing the code:": [[32, "sample-run-of-importing-the-code"], [34, "sample-run-of-importing-the-code"], [34, "id2"], [36, "sample-run-of-importing-the-code"], [36, "id2"], [38, "sample-run-of-importing-the-code"], [38, "id2"], [40, "sample-run-of-importing-the-code"], [42, "sample-run-of-importing-the-code"], [42, "id2"], [53, "sample-run-of-importing-the-code"], [55, "sample-run-of-importing-the-code"], [55, "id2"], [55, "id5"], [59, "sample-run-of-importing-the-code"], [59, "id2"], [60, "sample-run-of-importing-the-code"], [62, "sample-run-of-importing-the-code"], [62, "id2"], [62, "id5"], [68, "sample-run-of-importing-the-code"], [73, "sample-run-of-importing-the-code"], [75, "sample-run-of-importing-the-code"], [105, "sample-run-of-importing-the-code"]], "PAMI.fuzzyCorrelatedPattern.basic.abstract module": [[32, "module-PAMI.fuzzyCorrelatedPattern.basic.abstract"]], "PAMI.fuzzyFrequentPattern package": [[33, "pami-fuzzyfrequentpattern-package"]], "PAMI.fuzzyFrequentPattern.basic package": [[34, "pami-fuzzyfrequentpattern-basic-package"]], "PAMI.fuzzyFrequentPattern.basic.FFIMiner module": [[34, "module-PAMI.fuzzyFrequentPattern.basic.FFIMiner"]], "PAMI.fuzzyFrequentPattern.basic.FFIMiner_old module": [[34, "module-PAMI.fuzzyFrequentPattern.basic.FFIMiner_old"]], "PAMI.fuzzyFrequentPattern.basic.abstract module": [[34, "module-PAMI.fuzzyFrequentPattern.basic.abstract"]], "PAMI.fuzzyGeoreferencedFrequentPattern package": [[35, "pami-fuzzygeoreferencedfrequentpattern-package"]], "PAMI.fuzzyGeoreferencedFrequentPattern.basic package": [[36, "pami-fuzzygeoreferencedfrequentpattern-basic-package"]], "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner module": [[36, "module-PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner"]], "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old module": [[36, "module-PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old"]], "PAMI.fuzzyGeoreferencedFrequentPattern.basic.abstract module": [[36, "module-PAMI.fuzzyGeoreferencedFrequentPattern.basic.abstract"]], "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern package": [[37, "pami-fuzzygeoreferencedperiodicfrequentpattern-package"]], "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic package": [[38, "pami-fuzzygeoreferencedperiodicfrequentpattern-basic-package"]], "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner module": [[38, "module-PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner"]], "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old module": [[38, "module-PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old"]], "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.abstract module": [[38, "module-PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.abstract"]], "PAMI.fuzzyPartialPeriodicPatterns package": [[39, "pami-fuzzypartialperiodicpatterns-package"]], "PAMI.fuzzyPartialPeriodicPatterns.basic package": [[40, "pami-fuzzypartialperiodicpatterns-basic-package"]], "PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner module": [[40, "module-PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner"]], "PAMI.fuzzyPartialPeriodicPatterns.basic.abstract module": [[40, "module-PAMI.fuzzyPartialPeriodicPatterns.basic.abstract"]], "PAMI.fuzzyPeriodicFrequentPattern package": [[41, "pami-fuzzyperiodicfrequentpattern-package"]], "PAMI.fuzzyPeriodicFrequentPattern.basic package": [[42, "pami-fuzzyperiodicfrequentpattern-basic-package"]], "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner module": [[42, "module-PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner"]], "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old module": [[42, "module-PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old"]], "PAMI.fuzzyPeriodicFrequentPattern.basic.abstract module": [[42, "module-PAMI.fuzzyPeriodicFrequentPattern.basic.abstract"]], "PAMI.geoReferencedPeriodicFrequentPattern package": [[43, "pami-georeferencedperiodicfrequentpattern-package"]], "PAMI.geoReferencedPeriodicFrequentPattern.basic package": [[44, "pami-georeferencedperiodicfrequentpattern-basic-package"]], "PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner module": [[44, "module-PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner"]], "Sample run of importing the code :": [[44, "sample-run-of-importing-the-code"], [46, "sample-run-of-importing-the-code"], [49, "sample-run-of-importing-the-code"]], "PAMI.geoReferencedPeriodicFrequentPattern.basic.abstract module": [[44, "module-PAMI.geoReferencedPeriodicFrequentPattern.basic.abstract"]], "PAMI.georeferencedFrequentPattern package": [[45, "pami-georeferencedfrequentpattern-package"]], "PAMI.georeferencedFrequentPattern.basic package": [[46, "pami-georeferencedfrequentpattern-basic-package"]], "PAMI.georeferencedFrequentPattern.basic.FSPGrowth module": [[46, "pami-georeferencedfrequentpattern-basic-fspgrowth-module"]], "PAMI.georeferencedFrequentPattern.basic.SpatialECLAT module": [[46, "module-PAMI.georeferencedFrequentPattern.basic.SpatialECLAT"]], "PAMI.georeferencedFrequentPattern.basic.abstract module": [[46, "module-PAMI.georeferencedFrequentPattern.basic.abstract"]], "PAMI.georeferencedFrequentSequencePattern package": [[47, "pami-georeferencedfrequentsequencepattern-package"]], "PAMI.georeferencedFrequentSequencePattern.abstract module": [[47, "module-PAMI.georeferencedFrequentSequencePattern.abstract"]], "PAMI.georeferencedPartialPeriodicPattern package": [[48, "pami-georeferencedpartialperiodicpattern-package"]], "PAMI.georeferencedPartialPeriodicPattern.basic package": [[49, "pami-georeferencedpartialperiodicpattern-basic-package"]], "PAMI.georeferencedPartialPeriodicPattern.basic.STEclat module": [[49, "module-PAMI.georeferencedPartialPeriodicPattern.basic.STEclat"]], "PAMI.georeferencedPartialPeriodicPattern.basic.abstract module": [[49, "module-PAMI.georeferencedPartialPeriodicPattern.basic.abstract"]], "PAMI.highUtilityFrequentPattern package": [[50, "pami-highutilityfrequentpattern-package"]], "PAMI.highUtilityFrequentPattern.basic package": [[51, "pami-highutilityfrequentpattern-basic-package"]], "PAMI.highUtilityFrequentPattern.basic.HUFIM module": [[51, "module-PAMI.highUtilityFrequentPattern.basic.HUFIM"]], "Executing the code on terminal": [[51, "executing-the-code-on-terminal"]], "Sample run of importing the code": [[51, "sample-run-of-importing-the-code"]], "PAMI.highUtilityFrequentPattern.basic.abstract module": [[51, "module-PAMI.highUtilityFrequentPattern.basic.abstract"]], "PAMI.highUtilityGeoreferencedFrequentPattern package": [[52, "pami-highutilitygeoreferencedfrequentpattern-package"]], "PAMI.highUtilityGeoreferencedFrequentPattern.basic package": [[53, "pami-highutilitygeoreferencedfrequentpattern-basic-package"]], "PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM module": [[53, "module-PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM"]], "PAMI.highUtilityGeoreferencedFrequentPattern.basic.abstract module": [[53, "module-PAMI.highUtilityGeoreferencedFrequentPattern.basic.abstract"]], "PAMI.highUtilityPattern package": [[54, "pami-highutilitypattern-package"]], "PAMI.highUtilityPattern.basic package": [[55, "pami-highutilitypattern-basic-package"]], "PAMI.highUtilityPattern.basic.EFIM module": [[55, "module-PAMI.highUtilityPattern.basic.EFIM"]], "PAMI.highUtilityPattern.basic.HMiner module": [[55, "module-PAMI.highUtilityPattern.basic.HMiner"]], "PAMI.highUtilityPattern.basic.UPGrowth module": [[55, "module-PAMI.highUtilityPattern.basic.UPGrowth"]], "PAMI.highUtilityPattern.basic.abstract module": [[55, "module-PAMI.highUtilityPattern.basic.abstract"]], "PAMI.highUtilityPattern.basic.efimParallel module": [[55, "pami-highutilitypattern-basic-efimparallel-module"]], "PAMI.highUtilityPattern.parallel package": [[56, "pami-highutilitypattern-parallel-package"]], "PAMI.highUtilityPattern.parallel.abstract module": [[56, "module-PAMI.highUtilityPattern.parallel.abstract"]], "PAMI.highUtilityPattern.parallel.efimparallel module": [[56, "pami-highutilitypattern-parallel-efimparallel-module"]], "PAMI.highUtilityPatternsInStreams package": [[57, "pami-highutilitypatternsinstreams-package"]], "PAMI.highUtilityPatternsInStreams.HUPMS module": [[57, "pami-highutilitypatternsinstreams-hupms-module"]], "PAMI.highUtilityPatternsInStreams.SHUGrowth module": [[57, "pami-highutilitypatternsinstreams-shugrowth-module"]], "PAMI.highUtilityPatternsInStreams.abstract module": [[57, "module-PAMI.highUtilityPatternsInStreams.abstract"]], "PAMI.highUtilitySpatialPattern package": [[58, "pami-highutilityspatialpattern-package"]], "PAMI.highUtilitySpatialPattern.abstract module": [[58, "module-PAMI.highUtilitySpatialPattern.abstract"]], "PAMI.highUtilitySpatialPattern.basic package": [[59, "pami-highutilityspatialpattern-basic-package"]], "PAMI.highUtilitySpatialPattern.basic.HDSHUIM module": [[59, "module-PAMI.highUtilitySpatialPattern.basic.HDSHUIM"]], "PAMI.highUtilitySpatialPattern.basic.SHUIM module": [[59, "module-PAMI.highUtilitySpatialPattern.basic.SHUIM"]], "PAMI.highUtilitySpatialPattern.basic.abstract module": [[59, "module-PAMI.highUtilitySpatialPattern.basic.abstract"]], "PAMI.highUtilitySpatialPattern.topk package": [[60, "pami-highutilityspatialpattern-topk-package"]], "PAMI.highUtilitySpatialPattern.topk.TKSHUIM module": [[60, "module-PAMI.highUtilitySpatialPattern.topk.TKSHUIM"]], "PAMI.highUtilitySpatialPattern.topk.abstract module": [[60, "module-PAMI.highUtilitySpatialPattern.topk.abstract"]], "PAMI.localPeriodicPattern package": [[61, "pami-localperiodicpattern-package"]], "PAMI.localPeriodicPattern.basic package": [[62, "pami-localperiodicpattern-basic-package"]], "PAMI.localPeriodicPattern.basic.LPPGrowth module": [[62, "module-PAMI.localPeriodicPattern.basic.LPPGrowth"]], "PAMI.localPeriodicPattern.basic.LPPMBreadth module": [[62, "module-PAMI.localPeriodicPattern.basic.LPPMBreadth"]], "PAMI.localPeriodicPattern.basic.LPPMDepth module": [[62, "module-PAMI.localPeriodicPattern.basic.LPPMDepth"]], "PAMI.localPeriodicPattern.basic.abstract module": [[62, "module-PAMI.localPeriodicPattern.basic.abstract"]], "PAMI.multipleMinimumSupportBasedFrequentPattern package": [[63, "pami-multipleminimumsupportbasedfrequentpattern-package"]], "PAMI.multipleMinimumSupportBasedFrequentPattern.basic package": [[64, "pami-multipleminimumsupportbasedfrequentpattern-basic-package"]], "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth module": [[64, "module-PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth"]], "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus module": [[64, "module-PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus"]], "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.abstract module": [[64, "module-PAMI.multipleMinimumSupportBasedFrequentPattern.basic.abstract"]], "PAMI.partialPeriodicFrequentPattern package": [[65, "pami-partialperiodicfrequentpattern-package"]], "PAMI.partialPeriodicFrequentPattern.basic package": [[66, "pami-partialperiodicfrequentpattern-basic-package"]], "PAMI.partialPeriodicFrequentPattern.basic.GPFgrowth module": [[66, "pami-partialperiodicfrequentpattern-basic-gpfgrowth-module"]], "PAMI.partialPeriodicFrequentPattern.basic.PPF_DFS module": [[66, "pami-partialperiodicfrequentpattern-basic-ppf-dfs-module"]], "PAMI.partialPeriodicFrequentPattern.basic.abstract module": [[66, "module-PAMI.partialPeriodicFrequentPattern.basic.abstract"]], "PAMI.partialPeriodicPattern package": [[67, "pami-partialperiodicpattern-package"]], "PAMI.partialPeriodicPattern.basic package": [[68, "pami-partialperiodicpattern-basic-package"]], "PAMI.partialPeriodicPattern.basic.GThreePGrowth module": [[68, "pami-partialperiodicpattern-basic-gthreepgrowth-module"]], "PAMI.partialPeriodicPattern.basic.Gabstract module": [[68, "module-PAMI.partialPeriodicPattern.basic.Gabstract"]], "PAMI.partialPeriodicPattern.basic.PPPGrowth module": [[68, "module-PAMI.partialPeriodicPattern.basic.PPPGrowth"]], "PAMI.partialPeriodicPattern.basic.PPP_ECLAT module": [[68, "module-PAMI.partialPeriodicPattern.basic.PPP_ECLAT"]], "PAMI.partialPeriodicPattern.basic.abstract module": [[68, "module-PAMI.partialPeriodicPattern.basic.abstract"]], "PAMI.partialPeriodicPattern.closed package": [[69, "pami-partialperiodicpattern-closed-package"]], "PAMI.partialPeriodicPattern.closed.PPPClose module": [[69, "module-PAMI.partialPeriodicPattern.closed.PPPClose"]], "Sample run of the imported code:": [[69, "sample-run-of-the-imported-code"], [80, "sample-run-of-the-imported-code"]], "PAMI.partialPeriodicPattern.closed.abstract module": [[69, "module-PAMI.partialPeriodicPattern.closed.abstract"]], "PAMI.partialPeriodicPattern.maximal package": [[70, "pami-partialperiodicpattern-maximal-package"]], "PAMI.partialPeriodicPattern.maximal.Max3PGrowth module": [[70, "pami-partialperiodicpattern-maximal-max3pgrowth-module"]], "PAMI.partialPeriodicPattern.maximal.abstract module": [[70, "module-PAMI.partialPeriodicPattern.maximal.abstract"]], "PAMI.partialPeriodicPattern.pyspark package": [[71, "pami-partialperiodicpattern-pyspark-package"]], "PAMI.partialPeriodicPattern.pyspark.abstract module": [[71, "module-PAMI.partialPeriodicPattern.pyspark.abstract"]], "PAMI.partialPeriodicPattern.pyspark.parallel3PGrowth module": [[71, "pami-partialperiodicpattern-pyspark-parallel3pgrowth-module"]], "PAMI.partialPeriodicPattern.topk package": [[72, "pami-partialperiodicpattern-topk-package"]], "PAMI.partialPeriodicPattern.topk.abstract module": [[72, "module-PAMI.partialPeriodicPattern.topk.abstract"]], "PAMI.partialPeriodicPattern.topk.k3PMiner module": [[72, "module-PAMI.partialPeriodicPattern.topk.k3PMiner"]], "PAMI.partialPeriodicPatternInMultipleTimeSeries package": [[73, "pami-partialperiodicpatterninmultipletimeseries-package"]], "PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth module": [[73, "module-PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth"]], "PAMI.partialPeriodicPatternInMultipleTimeSeries.abstract module": [[73, "module-PAMI.partialPeriodicPatternInMultipleTimeSeries.abstract"]], "PAMI.periodicCorrelatedPattern package": [[74, "pami-periodiccorrelatedpattern-package"]], "PAMI.periodicCorrelatedPattern.basic package": [[75, "pami-periodiccorrelatedpattern-basic-package"]], "PAMI.periodicCorrelatedPattern.basic.EPCPGrowth module": [[75, "module-PAMI.periodicCorrelatedPattern.basic.EPCPGrowth"]], "PAMI.periodicCorrelatedPattern.basic.abstract module": [[75, "module-PAMI.periodicCorrelatedPattern.basic.abstract"]], "PAMI.periodicFrequentPattern package": [[76, "pami-periodicfrequentpattern-package"]], "PAMI.periodicFrequentPattern.basic package": [[77, "pami-periodicfrequentpattern-basic-package"]], "PAMI.periodicFrequentPattern.basic.PFECLAT module": [[77, "module-PAMI.periodicFrequentPattern.basic.PFECLAT"]], "PAMI.periodicFrequentPattern.basic.PFPGrowth module": [[77, "module-PAMI.periodicFrequentPattern.basic.PFPGrowth"]], "PAMI.periodicFrequentPattern.basic.PFPGrowthPlus module": [[77, "module-PAMI.periodicFrequentPattern.basic.PFPGrowthPlus"]], "PAMI.periodicFrequentPattern.basic.PFPMC module": [[77, "module-PAMI.periodicFrequentPattern.basic.PFPMC"]], "PAMI.periodicFrequentPattern.basic.PSGrowth module": [[77, "module-PAMI.periodicFrequentPattern.basic.PSGrowth"]], "PAMI.periodicFrequentPattern.basic.abstract module": [[77, "module-PAMI.periodicFrequentPattern.basic.abstract"]], "PAMI.periodicFrequentPattern.basic.parallelPFPGrowth module": [[77, "pami-periodicfrequentpattern-basic-parallelpfpgrowth-module"]], "PAMI.periodicFrequentPattern.closed package": [[78, "pami-periodicfrequentpattern-closed-package"]], "PAMI.periodicFrequentPattern.closed.CPFPMiner module": [[78, "module-PAMI.periodicFrequentPattern.closed.CPFPMiner"]], "PAMI.periodicFrequentPattern.closed.abstract module": [[78, "module-PAMI.periodicFrequentPattern.closed.abstract"]], "PAMI.periodicFrequentPattern.cuda package": [[79, "pami-periodicfrequentpattern-cuda-package"]], "PAMI.periodicFrequentPattern.cuda.abstract module": [[79, "pami-periodicfrequentpattern-cuda-abstract-module"]], "PAMI.periodicFrequentPattern.cuda.cuGPFMiner module": [[79, "pami-periodicfrequentpattern-cuda-cugpfminer-module"]], "PAMI.periodicFrequentPattern.cuda.gPFMinerBit module": [[79, "pami-periodicfrequentpattern-cuda-gpfminerbit-module"]], "PAMI.periodicFrequentPattern.maximal package": [[80, "pami-periodicfrequentpattern-maximal-package"]], "PAMI.periodicFrequentPattern.maximal.MaxPFGrowth module": [[80, "module-PAMI.periodicFrequentPattern.maximal.MaxPFGrowth"]], "PAMI.periodicFrequentPattern.maximal.abstract module": [[80, "module-PAMI.periodicFrequentPattern.maximal.abstract"]], "PAMI.periodicFrequentPattern.pyspark package": [[81, "pami-periodicfrequentpattern-pyspark-package"]], "PAMI.periodicFrequentPattern.pyspark.abstract module": [[81, "pami-periodicfrequentpattern-pyspark-abstract-module"]], "PAMI.periodicFrequentPattern.pyspark.parallelPFPGrowth module": [[81, "pami-periodicfrequentpattern-pyspark-parallelpfpgrowth-module"]], "PAMI.periodicFrequentPattern.topk package": [[82, "pami-periodicfrequentpattern-topk-package"]], "PAMI.periodicFrequentPattern.topk.TopkPFP package": [[83, "pami-periodicfrequentpattern-topk-topkpfp-package"]], "PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP module": [[83, "module-PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP"]], "PAMI.periodicFrequentPattern.topk.TopkPFP.abstract module": [[83, "module-PAMI.periodicFrequentPattern.topk.TopkPFP.abstract"]], "PAMI.periodicFrequentPattern.topk.kPFPMiner package": [[84, "pami-periodicfrequentpattern-topk-kpfpminer-package"]], "PAMI.periodicFrequentPattern.topk.kPFPMiner.abstract module": [[84, "module-PAMI.periodicFrequentPattern.topk.kPFPMiner.abstract"]], "PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner module": [[84, "module-PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner"]], "**Sample run of the importing code:": [[84, "sample-run-of-the-importing-code"]], "PAMI.recurringPattern package": [[85, "pami-recurringpattern-package"]], "PAMI.recurringPattern.basic package": [[86, "pami-recurringpattern-basic-package"]], "PAMI.recurringPattern.basic.RPGrowth module": [[86, "module-PAMI.recurringPattern.basic.RPGrowth"]], "PAMI.recurringPattern.basic.abstract module": [[86, "module-PAMI.recurringPattern.basic.abstract"]], "PAMI.relativeFrequentPattern package": [[87, "pami-relativefrequentpattern-package"]], "PAMI.relativeFrequentPattern.basic package": [[88, "pami-relativefrequentpattern-basic-package"]], "PAMI.relativeFrequentPattern.basic.RSFPGrowth module": [[88, "module-PAMI.relativeFrequentPattern.basic.RSFPGrowth"]], "PAMI.relativeFrequentPattern.basic.abstract module": [[88, "module-PAMI.relativeFrequentPattern.basic.abstract"]], "PAMI.relativeHighUtilityPattern package": [[89, "pami-relativehighutilitypattern-package"]], "PAMI.relativeHighUtilityPattern.basic package": [[90, "pami-relativehighutilitypattern-basic-package"]], "PAMI.relativeHighUtilityPattern.basic.RHUIM module": [[90, "module-PAMI.relativeHighUtilityPattern.basic.RHUIM"]], "PAMI.relativeHighUtilityPattern.basic.abstract module": [[90, "module-PAMI.relativeHighUtilityPattern.basic.abstract"]], "PAMI.sequence package": [[91, "pami-sequence-package"]], "PAMI.sequentialPatternMining package": [[92, "pami-sequentialpatternmining-package"]], "PAMI.sequentialPatternMining.basic package": [[93, "pami-sequentialpatternmining-basic-package"]], "PAMI.sequentialPatternMining.basic.SPADE module": [[93, "module-PAMI.sequentialPatternMining.basic.SPADE"]], "PAMI.sequentialPatternMining.basic.SPAM module": [[93, "module-PAMI.sequentialPatternMining.basic.SPAM"]], "PAMI.sequentialPatternMining.basic.abstract module": [[93, "module-PAMI.sequentialPatternMining.basic.abstract"]], "PAMI.sequentialPatternMining.basic.prefixSpan module": [[93, "module-PAMI.sequentialPatternMining.basic.prefixSpan"]], "PAMI.sequentialPatternMining.closed package": [[94, "pami-sequentialpatternmining-closed-package"]], "PAMI.sequentialPatternMining.closed.abstract module": [[94, "module-PAMI.sequentialPatternMining.closed.abstract"]], "PAMI.sequentialPatternMining.closed.bide module": [[94, "module-PAMI.sequentialPatternMining.closed.bide"]], "PAMI.stablePeriodicFrequentPattern package": [[95, "pami-stableperiodicfrequentpattern-package"]], "PAMI.stablePeriodicFrequentPattern.basic package": [[96, "pami-stableperiodicfrequentpattern-basic-package"]], "PAMI.stablePeriodicFrequentPattern.basic.SPPEclat module": [[96, "module-PAMI.stablePeriodicFrequentPattern.basic.SPPEclat"]], "PAMI.stablePeriodicFrequentPattern.basic.SPPGrowth module": [[96, "pami-stableperiodicfrequentpattern-basic-sppgrowth-module"]], "PAMI.stablePeriodicFrequentPattern.basic.SPPGrowthDump module": [[96, "pami-stableperiodicfrequentpattern-basic-sppgrowthdump-module"]], "PAMI.stablePeriodicFrequentPattern.basic.abstract module": [[96, "module-PAMI.stablePeriodicFrequentPattern.basic.abstract"]], "PAMI.stablePeriodicFrequentPattern.topK package": [[97, "pami-stableperiodicfrequentpattern-topk-package"]], "PAMI.stablePeriodicFrequentPattern.topK.TSPIN module": [[97, "module-PAMI.stablePeriodicFrequentPattern.topK.TSPIN"]], "PAMI.stablePeriodicFrequentPattern.topK.abstract module": [[97, "module-PAMI.stablePeriodicFrequentPattern.topK.abstract"]], "PAMI.subgraphMining package": [[98, "pami-subgraphmining-package"]], "PAMI.subgraphMining.basic package": [[99, "pami-subgraphmining-basic-package"]], "PAMI.subgraphMining.basic.abstract module": [[99, "module-PAMI.subgraphMining.basic.abstract"]], "PAMI.subgraphMining.basic.dfsCode module": [[99, "module-PAMI.subgraphMining.basic.dfsCode"]], "PAMI.subgraphMining.basic.edge module": [[99, "module-PAMI.subgraphMining.basic.edge"]], "PAMI.subgraphMining.basic.extendedEdge module": [[99, "module-PAMI.subgraphMining.basic.extendedEdge"]], "PAMI.subgraphMining.basic.frequentSubgraph module": [[99, "module-PAMI.subgraphMining.basic.frequentSubgraph"]], "PAMI.subgraphMining.basic.graph module": [[99, "module-PAMI.subgraphMining.basic.graph"]], "PAMI.subgraphMining.basic.gspan module": [[99, "module-PAMI.subgraphMining.basic.gspan"]], "PAMI.subgraphMining.basic.sparseTriangularMatrix module": [[99, "module-PAMI.subgraphMining.basic.sparseTriangularMatrix"]], "PAMI.subgraphMining.basic.vertex module": [[99, "module-PAMI.subgraphMining.basic.vertex"]], "PAMI.subgraphMining.topK package": [[100, "pami-subgraphmining-topk-package"]], "PAMI.subgraphMining.topK.DFSCode module": [[100, "module-PAMI.subgraphMining.topK.DFSCode"]], "PAMI.subgraphMining.topK.DFSThread module": [[100, "module-PAMI.subgraphMining.topK.DFSThread"]], "PAMI.subgraphMining.topK.abstract module": [[100, "module-PAMI.subgraphMining.topK.abstract"]], "PAMI.subgraphMining.topK.edge module": [[100, "module-PAMI.subgraphMining.topK.edge"]], "PAMI.subgraphMining.topK.extendedEdge module": [[100, "module-PAMI.subgraphMining.topK.extendedEdge"]], "PAMI.subgraphMining.topK.frequentSubgraph module": [[100, "module-PAMI.subgraphMining.topK.frequentSubgraph"]], "PAMI.subgraphMining.topK.graph module": [[100, "module-PAMI.subgraphMining.topK.graph"]], "PAMI.subgraphMining.topK.sparseTriangularMatrix module": [[100, "module-PAMI.subgraphMining.topK.sparseTriangularMatrix"]], "PAMI.subgraphMining.topK.tkg module": [[100, "module-PAMI.subgraphMining.topK.tkg"]], "PAMI.subgraphMining.topK.vertex module": [[100, "module-PAMI.subgraphMining.topK.vertex"]], "PAMI.uncertainFaultTolerantFrequentPattern package": [[101, "pami-uncertainfaulttolerantfrequentpattern-package"]], "PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine module": [[101, "module-PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine"]], "PAMI.uncertainFaultTolerantFrequentPattern.abstract module": [[101, "module-PAMI.uncertainFaultTolerantFrequentPattern.abstract"]], "PAMI.uncertainFrequentPattern package": [[102, "pami-uncertainfrequentpattern-package"]], "PAMI.uncertainFrequentPattern.basic package": [[103, "pami-uncertainfrequentpattern-basic-package"]], "PAMI.uncertainFrequentPattern.basic.CUFPTree module": [[103, "module-PAMI.uncertainFrequentPattern.basic.CUFPTree"]], "PAMI.uncertainFrequentPattern.basic.PUFGrowth module": [[103, "pami-uncertainfrequentpattern-basic-pufgrowth-module"]], "PAMI.uncertainFrequentPattern.basic.TUFP module": [[103, "pami-uncertainfrequentpattern-basic-tufp-module"]], "PAMI.uncertainFrequentPattern.basic.TubeP module": [[103, "pami-uncertainfrequentpattern-basic-tubep-module"]], "PAMI.uncertainFrequentPattern.basic.TubeS module": [[103, "pami-uncertainfrequentpattern-basic-tubes-module"]], "PAMI.uncertainFrequentPattern.basic.UFGrowth module": [[103, "pami-uncertainfrequentpattern-basic-ufgrowth-module"]], "PAMI.uncertainFrequentPattern.basic.UVECLAT module": [[103, "pami-uncertainfrequentpattern-basic-uveclat-module"]], "PAMI.uncertainFrequentPattern.basic.abstract module": [[103, "module-PAMI.uncertainFrequentPattern.basic.abstract"]], "PAMI.uncertainGeoreferencedFrequentPattern package": [[104, "pami-uncertaingeoreferencedfrequentpattern-package"]], "PAMI.uncertainGeoreferencedFrequentPattern.basic package": [[105, "pami-uncertaingeoreferencedfrequentpattern-basic-package"]], "PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth module": [[105, "module-PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth"]], "PAMI.uncertainGeoreferencedFrequentPattern.basic.abstract module": [[105, "module-PAMI.uncertainGeoreferencedFrequentPattern.basic.abstract"]], "PAMI.uncertainPeriodicFrequentPattern package": [[106, "pami-uncertainperiodicfrequentpattern-package"]], "PAMI.uncertainPeriodicFrequentPattern.basic package": [[107, "pami-uncertainperiodicfrequentpattern-basic-package"]], "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth module": [[107, "module-PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth"]], "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus module": [[107, "module-PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus"]], "PAMI.uncertainPeriodicFrequentPattern.basic.abstract module": [[107, "module-PAMI.uncertainPeriodicFrequentPattern.basic.abstract"]], "PAMI.weightedFrequentNeighbourhoodPattern package": [[108, "pami-weightedfrequentneighbourhoodpattern-package"]], "PAMI.weightedFrequentNeighbourhoodPattern.basic package": [[109, "pami-weightedfrequentneighbourhoodpattern-basic-package"]], "PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth module": [[109, "module-PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth"]], "PAMI.weightedFrequentNeighbourhoodPattern.basic.abstract module": [[109, "module-PAMI.weightedFrequentNeighbourhoodPattern.basic.abstract"]], "PAMI.weightedFrequentPattern package": [[110, "pami-weightedfrequentpattern-package"]], "PAMI.weightedFrequentPattern.basic package": [[111, "pami-weightedfrequentpattern-basic-package"]], "PAMI.weightedFrequentPattern.basic.WFIM module": [[111, "module-PAMI.weightedFrequentPattern.basic.WFIM"]], "PAMI.weightedFrequentPattern.basic.abstract module": [[111, "module-PAMI.weightedFrequentPattern.basic.abstract"]], "PAMI.weightedFrequentRegularPattern package": [[112, "pami-weightedfrequentregularpattern-package"]], "PAMI.weightedFrequentRegularPattern.basic package": [[113, "pami-weightedfrequentregularpattern-basic-package"]], "PAMI.weightedFrequentRegularPattern.basic.WFRIMiner module": [[113, "module-PAMI.weightedFrequentRegularPattern.basic.WFRIMiner"]], "PAMI.weightedFrequentRegularPattern.basic.abstract module": [[113, "module-PAMI.weightedFrequentRegularPattern.basic.abstract"]], "PAMI.weightedUncertainFrequentPattern package": [[114, "pami-weighteduncertainfrequentpattern-package"]], "PAMI.weightedUncertainFrequentPattern.basic package": [[115, "pami-weighteduncertainfrequentpattern-basic-package"]], "PAMI.weightedUncertainFrequentPattern.basic.WUFIM module": [[115, "module-PAMI.weightedUncertainFrequentPattern.basic.WUFIM"]], "PAMI.weightedUncertainFrequentPattern.basic.abstract module": [[115, "module-PAMI.weightedUncertainFrequentPattern.basic.abstract"]], "Welcome to PAMI\u2019s documentation!": [[116, "welcome-to-pami-s-documentation"]], "Contents:": [[116, null]], "Indices and tables": [[116, "indices-and-tables"]], "PAMI": [[117, "pami"]]}, "indexentries": {"pami": [[0, "module-PAMI"]], "module": [[0, "module-PAMI"], [1, "module-PAMI.AssociationRules"], [2, "module-PAMI.AssociationRules.basic"], [2, "module-PAMI.AssociationRules.basic.ARWithConfidence"], [2, "module-PAMI.AssociationRules.basic.ARWithLeverage"], [2, "module-PAMI.AssociationRules.basic.ARWithLift"], [2, "module-PAMI.AssociationRules.basic.RuleMiner"], [2, "module-PAMI.AssociationRules.basic.abstract"], [3, "module-PAMI.correlatedPattern"], [4, "module-PAMI.correlatedPattern.basic"], [4, "module-PAMI.correlatedPattern.basic.CoMine"], [4, "module-PAMI.correlatedPattern.basic.CoMinePlus"], [4, "module-PAMI.correlatedPattern.basic.abstract"], [5, "module-PAMI.coveragePattern"], [6, "module-PAMI.coveragePattern.basic"], [6, "module-PAMI.coveragePattern.basic.CMine"], [6, "module-PAMI.coveragePattern.basic.CPPG"], [6, "module-PAMI.coveragePattern.basic.abstract"], [7, "module-PAMI.extras"], [7, "module-PAMI.extras.generateLatexGraphFile"], [7, "module-PAMI.extras.plotPointOnMap"], [7, "module-PAMI.extras.plotPointOnMap_dump"], [7, "module-PAMI.extras.scatterPlotSpatialPoints"], [7, "module-PAMI.extras.topKPatterns"], [7, "module-PAMI.extras.uncertaindb_convert"], [8, "module-PAMI.extras.DF2DB"], [8, "module-PAMI.extras.DF2DB.DF2DB"], [8, "module-PAMI.extras.DF2DB.DenseFormatDF"], [8, "module-PAMI.extras.DF2DB.SparseFormatDF"], [8, "module-PAMI.extras.DF2DB.createTDB"], [8, "module-PAMI.extras.DF2DB.denseDF2DBPlus"], [8, "module-PAMI.extras.DF2DB.denseDF2DB_dump"], [8, "module-PAMI.extras.DF2DB.sparseDF2DBPlus"], [9, "module-PAMI.extras.calculateMISValues"], [9, "module-PAMI.extras.calculateMISValues.usingBeta"], [9, "module-PAMI.extras.calculateMISValues.usingSD"], [10, "module-PAMI.extras.dbStats"], [10, "module-PAMI.extras.dbStats.FuzzyDatabase"], [10, "module-PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats"], [10, "module-PAMI.extras.dbStats.SequentialDatabase"], [10, "module-PAMI.extras.dbStats.TemporalDatabase"], [10, "module-PAMI.extras.dbStats.TransactionalDatabase"], [10, "module-PAMI.extras.dbStats.UncertainTemporalDatabase"], [10, "module-PAMI.extras.dbStats.UncertainTransactionalDatabase"], [10, "module-PAMI.extras.dbStats.UtilityDatabase"], [11, "module-PAMI.extras.fuzzyTransformation"], [11, "module-PAMI.extras.fuzzyTransformation.abstract"], [11, "module-PAMI.extras.fuzzyTransformation.temporalToFuzzy"], [11, "module-PAMI.extras.fuzzyTransformation.transactionalToFuzzy"], [12, "module-PAMI.extras.generateDatabase"], [12, "module-PAMI.extras.generateDatabase.generateSpatioTemporalDatabase"], [12, "module-PAMI.extras.generateDatabase.generateTemporalDatabase"], [12, "module-PAMI.extras.generateDatabase.generateTransactionalDatabase"], [13, "module-PAMI.extras.graph"], [13, "module-PAMI.extras.graph.DF2Fig"], [13, "module-PAMI.extras.graph.plotLineGraphFromDictionary"], [13, "module-PAMI.extras.graph.plotLineGraphsFromDataFrame"], [13, "module-PAMI.extras.graph.visualizeFuzzyPatterns"], [13, "module-PAMI.extras.graph.visualizePatterns"], [14, "module-PAMI.extras.image2Database"], [15, "module-PAMI.extras.imageProcessing"], [15, "module-PAMI.extras.imageProcessing.imagery2Databases"], [16, "module-PAMI.extras.messaging"], [16, "module-PAMI.extras.messaging.discord"], [16, "module-PAMI.extras.messaging.gmail"], [17, "module-PAMI.extras.neighbours"], [17, "module-PAMI.extras.neighbours.findNeighborsUsingEuclideanDistanceforPointInfo"], [17, "module-PAMI.extras.neighbours.findNeighboursUsingEuclidean"], [17, "module-PAMI.extras.neighbours.findNeighboursUsingGeodesic"], [18, "module-PAMI.extras.sampleDatasets"], [19, "module-PAMI.extras.stats"], [19, "module-PAMI.extras.stats.TransactionalDatabase"], [19, "module-PAMI.extras.stats.graphDatabase"], [19, "module-PAMI.extras.stats.sequentialDatabase"], [19, "module-PAMI.extras.stats.temporalDatabase"], [19, "module-PAMI.extras.stats.utilityDatabase"], [20, "module-PAMI.extras.syntheticDataGenerator"], [20, "module-PAMI.extras.syntheticDataGenerator.TemporalDatabase"], [20, "module-PAMI.extras.syntheticDataGenerator.TransactionalDatabase"], [20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTemporal"], [20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTransactions"], [20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialUncertainTransaction"], [20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticTemporal"], [20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticTransactions"], [20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTemporal"], [20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTransactions"], [20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticUtility"], [20, "module-PAMI.extras.syntheticDataGenerator.fuzzyDatabase"], [20, "module-PAMI.extras.syntheticDataGenerator.generateTemporal"], [20, "module-PAMI.extras.syntheticDataGenerator.generateTransactional"], [20, "module-PAMI.extras.syntheticDataGenerator.generateUncertainTemporal"], [20, "module-PAMI.extras.syntheticDataGenerator.generateUncertainTransactional"], [20, "module-PAMI.extras.syntheticDataGenerator.generateUtilityTemporal"], [20, "module-PAMI.extras.syntheticDataGenerator.generateUtilityTransactional"], [20, "module-PAMI.extras.syntheticDataGenerator.georeferencedTemporalDatabase"], [20, "module-PAMI.extras.syntheticDataGenerator.georeferencedTransactionalDatabase"], [20, "module-PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase"], [20, "module-PAMI.extras.syntheticDataGenerator.temporalDatabaseGen"], [20, "module-PAMI.extras.syntheticDataGenerator.utilityDatabase"], [21, "module-PAMI.extras.visualize"], [21, "module-PAMI.extras.visualize.graphs"], [22, "module-PAMI.faultTolerantFrequentPattern"], [23, "module-PAMI.faultTolerantFrequentPattern.basic"], [23, "module-PAMI.faultTolerantFrequentPattern.basic.FTApriori"], [23, "module-PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth"], [23, "module-PAMI.faultTolerantFrequentPattern.basic.abstract"], [24, "module-PAMI.frequentPattern"], [25, "module-PAMI.frequentPattern.basic"], [25, "module-PAMI.frequentPattern.basic.Apriori"], [25, "module-PAMI.frequentPattern.basic.ECLAT"], [25, "module-PAMI.frequentPattern.basic.ECLATDiffset"], [25, "module-PAMI.frequentPattern.basic.ECLATbitset"], [25, "module-PAMI.frequentPattern.basic.FPGrowth"], [25, "module-PAMI.frequentPattern.basic.abstract"], [26, "module-PAMI.frequentPattern.closed"], [26, "module-PAMI.frequentPattern.closed.CHARM"], [26, "module-PAMI.frequentPattern.closed.abstract"], [27, "module-PAMI.frequentPattern.cuda"], [28, "module-PAMI.frequentPattern.maximal"], [28, "module-PAMI.frequentPattern.maximal.MaxFPGrowth"], [28, "module-PAMI.frequentPattern.maximal.abstract"], [29, "module-PAMI.frequentPattern.pyspark"], [30, "module-PAMI.frequentPattern.topk"], [30, "module-PAMI.frequentPattern.topk.FAE"], [30, "module-PAMI.frequentPattern.topk.abstract"], [31, "module-PAMI.fuzzyCorrelatedPattern"], [32, "module-PAMI.fuzzyCorrelatedPattern.basic"], [32, "module-PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth"], [32, "module-PAMI.fuzzyCorrelatedPattern.basic.abstract"], [33, "module-PAMI.fuzzyFrequentPattern"], [34, "module-PAMI.fuzzyFrequentPattern.basic"], [34, "module-PAMI.fuzzyFrequentPattern.basic.FFIMiner"], [34, "module-PAMI.fuzzyFrequentPattern.basic.FFIMiner_old"], [34, "module-PAMI.fuzzyFrequentPattern.basic.abstract"], [35, "module-PAMI.fuzzyGeoreferencedFrequentPattern"], [36, "module-PAMI.fuzzyGeoreferencedFrequentPattern.basic"], [36, "module-PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner"], [36, "module-PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old"], [36, "module-PAMI.fuzzyGeoreferencedFrequentPattern.basic.abstract"], [37, "module-PAMI.fuzzyGeoreferencedPeriodicFrequentPattern"], [38, "module-PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic"], [38, "module-PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner"], [38, "module-PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old"], [38, "module-PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.abstract"], [39, "module-PAMI.fuzzyPartialPeriodicPatterns"], [40, "module-PAMI.fuzzyPartialPeriodicPatterns.basic"], [40, "module-PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner"], [40, "module-PAMI.fuzzyPartialPeriodicPatterns.basic.abstract"], [41, "module-PAMI.fuzzyPeriodicFrequentPattern"], [42, "module-PAMI.fuzzyPeriodicFrequentPattern.basic"], [42, "module-PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner"], [42, "module-PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old"], [42, "module-PAMI.fuzzyPeriodicFrequentPattern.basic.abstract"], [43, "module-PAMI.geoReferencedPeriodicFrequentPattern"], [44, "module-PAMI.geoReferencedPeriodicFrequentPattern.basic"], [44, "module-PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner"], [44, "module-PAMI.geoReferencedPeriodicFrequentPattern.basic.abstract"], [45, "module-PAMI.georeferencedFrequentPattern"], [46, "module-PAMI.georeferencedFrequentPattern.basic"], [46, "module-PAMI.georeferencedFrequentPattern.basic.SpatialECLAT"], [46, "module-PAMI.georeferencedFrequentPattern.basic.abstract"], [47, "module-PAMI.georeferencedFrequentSequencePattern"], [47, "module-PAMI.georeferencedFrequentSequencePattern.abstract"], [48, "module-PAMI.georeferencedPartialPeriodicPattern"], [49, "module-PAMI.georeferencedPartialPeriodicPattern.basic"], [49, "module-PAMI.georeferencedPartialPeriodicPattern.basic.STEclat"], [49, "module-PAMI.georeferencedPartialPeriodicPattern.basic.abstract"], [50, "module-PAMI.highUtilityFrequentPattern"], [51, "module-PAMI.highUtilityFrequentPattern.basic"], [51, "module-PAMI.highUtilityFrequentPattern.basic.HUFIM"], [51, "module-PAMI.highUtilityFrequentPattern.basic.abstract"], [52, "module-PAMI.highUtilityGeoreferencedFrequentPattern"], [53, "module-PAMI.highUtilityGeoreferencedFrequentPattern.basic"], [53, "module-PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM"], [53, "module-PAMI.highUtilityGeoreferencedFrequentPattern.basic.abstract"], [54, "module-PAMI.highUtilityPattern"], [55, "module-PAMI.highUtilityPattern.basic"], [55, "module-PAMI.highUtilityPattern.basic.EFIM"], [55, "module-PAMI.highUtilityPattern.basic.HMiner"], [55, "module-PAMI.highUtilityPattern.basic.UPGrowth"], [55, "module-PAMI.highUtilityPattern.basic.abstract"], [56, "module-PAMI.highUtilityPattern.parallel"], [56, "module-PAMI.highUtilityPattern.parallel.abstract"], [57, "module-PAMI.highUtilityPatternsInStreams"], [57, "module-PAMI.highUtilityPatternsInStreams.abstract"], [58, "module-PAMI.highUtilitySpatialPattern"], [58, "module-PAMI.highUtilitySpatialPattern.abstract"], [59, "module-PAMI.highUtilitySpatialPattern.basic"], [59, "module-PAMI.highUtilitySpatialPattern.basic.HDSHUIM"], [59, "module-PAMI.highUtilitySpatialPattern.basic.SHUIM"], [59, "module-PAMI.highUtilitySpatialPattern.basic.abstract"], [60, "module-PAMI.highUtilitySpatialPattern.topk"], [60, "module-PAMI.highUtilitySpatialPattern.topk.TKSHUIM"], [60, "module-PAMI.highUtilitySpatialPattern.topk.abstract"], [61, "module-PAMI.localPeriodicPattern"], [62, "module-PAMI.localPeriodicPattern.basic"], [62, "module-PAMI.localPeriodicPattern.basic.LPPGrowth"], [62, "module-PAMI.localPeriodicPattern.basic.LPPMBreadth"], [62, "module-PAMI.localPeriodicPattern.basic.LPPMDepth"], [62, "module-PAMI.localPeriodicPattern.basic.abstract"], [63, "module-PAMI.multipleMinimumSupportBasedFrequentPattern"], [64, "module-PAMI.multipleMinimumSupportBasedFrequentPattern.basic"], [64, "module-PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth"], [64, "module-PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus"], [64, "module-PAMI.multipleMinimumSupportBasedFrequentPattern.basic.abstract"], [65, "module-PAMI.partialPeriodicFrequentPattern"], [66, "module-PAMI.partialPeriodicFrequentPattern.basic"], [66, "module-PAMI.partialPeriodicFrequentPattern.basic.abstract"], [67, "module-PAMI.partialPeriodicPattern"], [68, "module-PAMI.partialPeriodicPattern.basic"], [68, "module-PAMI.partialPeriodicPattern.basic.Gabstract"], [68, "module-PAMI.partialPeriodicPattern.basic.PPPGrowth"], [68, "module-PAMI.partialPeriodicPattern.basic.PPP_ECLAT"], [68, "module-PAMI.partialPeriodicPattern.basic.abstract"], [69, "module-PAMI.partialPeriodicPattern.closed"], [69, "module-PAMI.partialPeriodicPattern.closed.PPPClose"], [69, "module-PAMI.partialPeriodicPattern.closed.abstract"], [70, "module-PAMI.partialPeriodicPattern.maximal"], [70, "module-PAMI.partialPeriodicPattern.maximal.abstract"], [71, "module-PAMI.partialPeriodicPattern.pyspark"], [71, "module-PAMI.partialPeriodicPattern.pyspark.abstract"], [72, "module-PAMI.partialPeriodicPattern.topk"], [72, "module-PAMI.partialPeriodicPattern.topk.abstract"], [72, "module-PAMI.partialPeriodicPattern.topk.k3PMiner"], [73, "module-PAMI.partialPeriodicPatternInMultipleTimeSeries"], [73, "module-PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth"], [73, "module-PAMI.partialPeriodicPatternInMultipleTimeSeries.abstract"], [74, "module-PAMI.periodicCorrelatedPattern"], [75, "module-PAMI.periodicCorrelatedPattern.basic"], [75, "module-PAMI.periodicCorrelatedPattern.basic.EPCPGrowth"], [75, "module-PAMI.periodicCorrelatedPattern.basic.abstract"], [76, "module-PAMI.periodicFrequentPattern"], [77, "module-PAMI.periodicFrequentPattern.basic"], [77, "module-PAMI.periodicFrequentPattern.basic.PFECLAT"], [77, "module-PAMI.periodicFrequentPattern.basic.PFPGrowth"], [77, "module-PAMI.periodicFrequentPattern.basic.PFPGrowthPlus"], [77, "module-PAMI.periodicFrequentPattern.basic.PFPMC"], [77, "module-PAMI.periodicFrequentPattern.basic.PSGrowth"], [77, "module-PAMI.periodicFrequentPattern.basic.abstract"], [78, "module-PAMI.periodicFrequentPattern.closed"], [78, "module-PAMI.periodicFrequentPattern.closed.CPFPMiner"], [78, "module-PAMI.periodicFrequentPattern.closed.abstract"], [79, "module-PAMI.periodicFrequentPattern.cuda"], [80, "module-PAMI.periodicFrequentPattern.maximal"], [80, "module-PAMI.periodicFrequentPattern.maximal.MaxPFGrowth"], [80, "module-PAMI.periodicFrequentPattern.maximal.abstract"], [81, "module-PAMI.periodicFrequentPattern.pyspark"], [82, "module-PAMI.periodicFrequentPattern.topk"], [83, "module-PAMI.periodicFrequentPattern.topk.TopkPFP"], [83, "module-PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP"], [83, "module-PAMI.periodicFrequentPattern.topk.TopkPFP.abstract"], [84, "module-PAMI.periodicFrequentPattern.topk.kPFPMiner"], [84, "module-PAMI.periodicFrequentPattern.topk.kPFPMiner.abstract"], [84, "module-PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner"], [85, "module-PAMI.recurringPattern"], [86, "module-PAMI.recurringPattern.basic"], [86, "module-PAMI.recurringPattern.basic.RPGrowth"], [86, "module-PAMI.recurringPattern.basic.abstract"], [87, "module-PAMI.relativeFrequentPattern"], [88, "module-PAMI.relativeFrequentPattern.basic"], [88, "module-PAMI.relativeFrequentPattern.basic.RSFPGrowth"], [88, "module-PAMI.relativeFrequentPattern.basic.abstract"], [89, "module-PAMI.relativeHighUtilityPattern"], [90, "module-PAMI.relativeHighUtilityPattern.basic"], [90, "module-PAMI.relativeHighUtilityPattern.basic.RHUIM"], [90, "module-PAMI.relativeHighUtilityPattern.basic.abstract"], [91, "module-PAMI.sequence"], [92, "module-PAMI.sequentialPatternMining"], [93, "module-PAMI.sequentialPatternMining.basic"], [93, "module-PAMI.sequentialPatternMining.basic.SPADE"], [93, "module-PAMI.sequentialPatternMining.basic.SPAM"], [93, "module-PAMI.sequentialPatternMining.basic.abstract"], [93, "module-PAMI.sequentialPatternMining.basic.prefixSpan"], [94, "module-PAMI.sequentialPatternMining.closed"], [94, "module-PAMI.sequentialPatternMining.closed.abstract"], [94, "module-PAMI.sequentialPatternMining.closed.bide"], [95, "module-PAMI.stablePeriodicFrequentPattern"], [96, "module-PAMI.stablePeriodicFrequentPattern.basic"], [96, "module-PAMI.stablePeriodicFrequentPattern.basic.SPPEclat"], [96, "module-PAMI.stablePeriodicFrequentPattern.basic.abstract"], [97, "module-PAMI.stablePeriodicFrequentPattern.topK"], [97, "module-PAMI.stablePeriodicFrequentPattern.topK.TSPIN"], [97, "module-PAMI.stablePeriodicFrequentPattern.topK.abstract"], [98, "module-PAMI.subgraphMining"], [99, "module-PAMI.subgraphMining.basic"], [99, "module-PAMI.subgraphMining.basic.abstract"], [99, "module-PAMI.subgraphMining.basic.dfsCode"], [99, "module-PAMI.subgraphMining.basic.edge"], [99, "module-PAMI.subgraphMining.basic.extendedEdge"], [99, "module-PAMI.subgraphMining.basic.frequentSubgraph"], [99, "module-PAMI.subgraphMining.basic.graph"], [99, "module-PAMI.subgraphMining.basic.gspan"], [99, "module-PAMI.subgraphMining.basic.sparseTriangularMatrix"], [99, "module-PAMI.subgraphMining.basic.vertex"], [100, "module-PAMI.subgraphMining.topK"], [100, "module-PAMI.subgraphMining.topK.DFSCode"], [100, "module-PAMI.subgraphMining.topK.DFSThread"], [100, "module-PAMI.subgraphMining.topK.abstract"], [100, "module-PAMI.subgraphMining.topK.edge"], [100, "module-PAMI.subgraphMining.topK.extendedEdge"], [100, "module-PAMI.subgraphMining.topK.frequentSubgraph"], [100, "module-PAMI.subgraphMining.topK.graph"], [100, "module-PAMI.subgraphMining.topK.sparseTriangularMatrix"], [100, "module-PAMI.subgraphMining.topK.tkg"], [100, "module-PAMI.subgraphMining.topK.vertex"], [101, "module-PAMI.uncertainFaultTolerantFrequentPattern"], [101, "module-PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine"], [101, "module-PAMI.uncertainFaultTolerantFrequentPattern.abstract"], [102, "module-PAMI.uncertainFrequentPattern"], [103, "module-PAMI.uncertainFrequentPattern.basic"], [103, "module-PAMI.uncertainFrequentPattern.basic.CUFPTree"], [103, "module-PAMI.uncertainFrequentPattern.basic.abstract"], [104, "module-PAMI.uncertainGeoreferencedFrequentPattern"], [105, "module-PAMI.uncertainGeoreferencedFrequentPattern.basic"], [105, "module-PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth"], [105, "module-PAMI.uncertainGeoreferencedFrequentPattern.basic.abstract"], [106, "module-PAMI.uncertainPeriodicFrequentPattern"], [107, "module-PAMI.uncertainPeriodicFrequentPattern.basic"], [107, "module-PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth"], [107, "module-PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus"], [107, "module-PAMI.uncertainPeriodicFrequentPattern.basic.abstract"], [108, "module-PAMI.weightedFrequentNeighbourhoodPattern"], [109, "module-PAMI.weightedFrequentNeighbourhoodPattern.basic"], [109, "module-PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth"], [109, "module-PAMI.weightedFrequentNeighbourhoodPattern.basic.abstract"], [110, "module-PAMI.weightedFrequentPattern"], [111, "module-PAMI.weightedFrequentPattern.basic"], [111, "module-PAMI.weightedFrequentPattern.basic.WFIM"], [111, "module-PAMI.weightedFrequentPattern.basic.abstract"], [112, "module-PAMI.weightedFrequentRegularPattern"], [113, "module-PAMI.weightedFrequentRegularPattern.basic"], [113, "module-PAMI.weightedFrequentRegularPattern.basic.WFRIMiner"], [113, "module-PAMI.weightedFrequentRegularPattern.basic.abstract"], [114, "module-PAMI.weightedUncertainFrequentPattern"], [115, "module-PAMI.weightedUncertainFrequentPattern.basic"], [115, "module-PAMI.weightedUncertainFrequentPattern.basic.WUFIM"], [115, "module-PAMI.weightedUncertainFrequentPattern.basic.abstract"]], "pami.associationrules": [[1, "module-PAMI.AssociationRules"]], "arwithconfidence (class in pami.associationrules.basic.arwithconfidence)": [[2, "PAMI.AssociationRules.basic.ARWithConfidence.ARWithConfidence"]], "arwithleverage (class in pami.associationrules.basic.arwithleverage)": [[2, "PAMI.AssociationRules.basic.ARWithLeverage.ARWithLeverage"]], "arwithlift (class in pami.associationrules.basic.arwithlift)": [[2, "PAMI.AssociationRules.basic.ARWithLift.ARWithLift"]], "confidence (class in pami.associationrules.basic.ruleminer)": [[2, "PAMI.AssociationRules.basic.RuleMiner.Confidence"]], "leverage (class in pami.associationrules.basic.ruleminer)": [[2, "PAMI.AssociationRules.basic.RuleMiner.Leverage"]], "lift (class in pami.associationrules.basic.arwithlift)": [[2, "PAMI.AssociationRules.basic.ARWithLift.Lift"]], "lift (class in pami.associationrules.basic.ruleminer)": [[2, "PAMI.AssociationRules.basic.RuleMiner.Lift"]], "pami.associationrules.basic": [[2, "module-PAMI.AssociationRules.basic"]], "pami.associationrules.basic.arwithconfidence": [[2, "module-PAMI.AssociationRules.basic.ARWithConfidence"]], "pami.associationrules.basic.arwithleverage": [[2, "module-PAMI.AssociationRules.basic.ARWithLeverage"]], "pami.associationrules.basic.arwithlift": [[2, "module-PAMI.AssociationRules.basic.ARWithLift"]], "pami.associationrules.basic.ruleminer": [[2, "module-PAMI.AssociationRules.basic.RuleMiner"]], "pami.associationrules.basic.abstract": [[2, "module-PAMI.AssociationRules.basic.abstract"]], "ruleminer (class in pami.associationrules.basic.ruleminer)": [[2, "PAMI.AssociationRules.basic.RuleMiner.RuleMiner"]], "getmemoryrss() (pami.associationrules.basic.arwithconfidence.arwithconfidence method)": [[2, "PAMI.AssociationRules.basic.ARWithConfidence.ARWithConfidence.getMemoryRSS"]], "getmemoryrss() (pami.associationrules.basic.arwithleverage.arwithleverage method)": [[2, "PAMI.AssociationRules.basic.ARWithLeverage.ARWithLeverage.getMemoryRSS"]], "getmemoryrss() (pami.associationrules.basic.arwithlift.arwithlift method)": [[2, "PAMI.AssociationRules.basic.ARWithLift.ARWithLift.getMemoryRSS"]], "getmemoryrss() (pami.associationrules.basic.ruleminer.ruleminer method)": [[2, "PAMI.AssociationRules.basic.RuleMiner.RuleMiner.getMemoryRSS"]], "getmemoryuss() (pami.associationrules.basic.arwithconfidence.arwithconfidence method)": [[2, "PAMI.AssociationRules.basic.ARWithConfidence.ARWithConfidence.getMemoryUSS"]], "getmemoryuss() (pami.associationrules.basic.arwithleverage.arwithleverage method)": [[2, "PAMI.AssociationRules.basic.ARWithLeverage.ARWithLeverage.getMemoryUSS"]], "getmemoryuss() (pami.associationrules.basic.arwithlift.arwithlift method)": [[2, "PAMI.AssociationRules.basic.ARWithLift.ARWithLift.getMemoryUSS"]], "getmemoryuss() (pami.associationrules.basic.ruleminer.ruleminer method)": [[2, "PAMI.AssociationRules.basic.RuleMiner.RuleMiner.getMemoryUSS"]], "getpatterns() (pami.associationrules.basic.arwithconfidence.arwithconfidence method)": [[2, "PAMI.AssociationRules.basic.ARWithConfidence.ARWithConfidence.getPatterns"]], "getpatterns() (pami.associationrules.basic.arwithleverage.arwithleverage method)": [[2, "PAMI.AssociationRules.basic.ARWithLeverage.ARWithLeverage.getPatterns"]], "getpatterns() (pami.associationrules.basic.arwithlift.arwithlift method)": [[2, "PAMI.AssociationRules.basic.ARWithLift.ARWithLift.getPatterns"]], "getpatterns() (pami.associationrules.basic.ruleminer.ruleminer method)": [[2, "PAMI.AssociationRules.basic.RuleMiner.RuleMiner.getPatterns"]], "getpatternsasdataframe() (pami.associationrules.basic.arwithconfidence.arwithconfidence method)": [[2, "PAMI.AssociationRules.basic.ARWithConfidence.ARWithConfidence.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.associationrules.basic.arwithleverage.arwithleverage method)": [[2, "PAMI.AssociationRules.basic.ARWithLeverage.ARWithLeverage.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.associationrules.basic.arwithlift.arwithlift method)": [[2, "PAMI.AssociationRules.basic.ARWithLift.ARWithLift.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.associationrules.basic.ruleminer.ruleminer method)": [[2, "PAMI.AssociationRules.basic.RuleMiner.RuleMiner.getPatternsAsDataFrame"]], "getruntime() (pami.associationrules.basic.arwithconfidence.arwithconfidence method)": [[2, "PAMI.AssociationRules.basic.ARWithConfidence.ARWithConfidence.getRuntime"]], "getruntime() (pami.associationrules.basic.arwithleverage.arwithleverage method)": [[2, "PAMI.AssociationRules.basic.ARWithLeverage.ARWithLeverage.getRuntime"]], "getruntime() (pami.associationrules.basic.arwithlift.arwithlift method)": [[2, "PAMI.AssociationRules.basic.ARWithLift.ARWithLift.getRuntime"]], "getruntime() (pami.associationrules.basic.ruleminer.ruleminer method)": [[2, "PAMI.AssociationRules.basic.RuleMiner.RuleMiner.getRuntime"]], "mine() (pami.associationrules.basic.arwithconfidence.arwithconfidence method)": [[2, "PAMI.AssociationRules.basic.ARWithConfidence.ARWithConfidence.mine"]], "mine() (pami.associationrules.basic.arwithleverage.arwithleverage method)": [[2, "PAMI.AssociationRules.basic.ARWithLeverage.ARWithLeverage.mine"]], "mine() (pami.associationrules.basic.arwithlift.arwithlift method)": [[2, "PAMI.AssociationRules.basic.ARWithLift.ARWithLift.mine"]], "mine() (pami.associationrules.basic.ruleminer.ruleminer method)": [[2, "PAMI.AssociationRules.basic.RuleMiner.RuleMiner.mine"]], "printresults() (pami.associationrules.basic.arwithconfidence.arwithconfidence method)": [[2, "PAMI.AssociationRules.basic.ARWithConfidence.ARWithConfidence.printResults"]], "printresults() (pami.associationrules.basic.arwithleverage.arwithleverage method)": [[2, "PAMI.AssociationRules.basic.ARWithLeverage.ARWithLeverage.printResults"]], "printresults() (pami.associationrules.basic.arwithlift.arwithlift method)": [[2, "PAMI.AssociationRules.basic.ARWithLift.ARWithLift.printResults"]], "printresults() (pami.associationrules.basic.ruleminer.ruleminer method)": [[2, "PAMI.AssociationRules.basic.RuleMiner.RuleMiner.printResults"]], "run() (pami.associationrules.basic.arwithlift.lift method)": [[2, "PAMI.AssociationRules.basic.ARWithLift.Lift.run"]], "run() (pami.associationrules.basic.ruleminer.confidence method)": [[2, "PAMI.AssociationRules.basic.RuleMiner.Confidence.run"]], "run() (pami.associationrules.basic.ruleminer.leverage method)": [[2, "PAMI.AssociationRules.basic.RuleMiner.Leverage.run"]], "run() (pami.associationrules.basic.ruleminer.lift method)": [[2, "PAMI.AssociationRules.basic.RuleMiner.Lift.run"]], "save() (pami.associationrules.basic.arwithconfidence.arwithconfidence method)": [[2, "PAMI.AssociationRules.basic.ARWithConfidence.ARWithConfidence.save"]], "save() (pami.associationrules.basic.arwithleverage.arwithleverage method)": [[2, "PAMI.AssociationRules.basic.ARWithLeverage.ARWithLeverage.save"]], "save() (pami.associationrules.basic.arwithlift.arwithlift method)": [[2, "PAMI.AssociationRules.basic.ARWithLift.ARWithLift.save"]], "save() (pami.associationrules.basic.ruleminer.ruleminer method)": [[2, "PAMI.AssociationRules.basic.RuleMiner.RuleMiner.save"]], "startmine() (pami.associationrules.basic.arwithconfidence.arwithconfidence method)": [[2, "PAMI.AssociationRules.basic.ARWithConfidence.ARWithConfidence.startMine"]], "startmine() (pami.associationrules.basic.arwithleverage.arwithleverage method)": [[2, "PAMI.AssociationRules.basic.ARWithLeverage.ARWithLeverage.startMine"]], "startmine() (pami.associationrules.basic.arwithlift.arwithlift method)": [[2, "PAMI.AssociationRules.basic.ARWithLift.ARWithLift.startMine"]], "startmine() (pami.associationrules.basic.ruleminer.ruleminer method)": [[2, "PAMI.AssociationRules.basic.RuleMiner.RuleMiner.startMine"]], "pami.correlatedpattern": [[3, "module-PAMI.correlatedPattern"]], "comine (class in pami.correlatedpattern.basic.comine)": [[4, "PAMI.correlatedPattern.basic.CoMine.CoMine"]], "comineplus (class in pami.correlatedpattern.basic.comineplus)": [[4, "PAMI.correlatedPattern.basic.CoMinePlus.CoMinePlus"]], "pami.correlatedpattern.basic": [[4, "module-PAMI.correlatedPattern.basic"]], "pami.correlatedpattern.basic.comine": [[4, "module-PAMI.correlatedPattern.basic.CoMine"]], "pami.correlatedpattern.basic.comineplus": [[4, "module-PAMI.correlatedPattern.basic.CoMinePlus"]], "pami.correlatedpattern.basic.abstract": [[4, "module-PAMI.correlatedPattern.basic.abstract"]], "getmemoryrss() (pami.correlatedpattern.basic.comine.comine method)": [[4, "PAMI.correlatedPattern.basic.CoMine.CoMine.getMemoryRSS"]], "getmemoryrss() (pami.correlatedpattern.basic.comineplus.comineplus method)": [[4, "PAMI.correlatedPattern.basic.CoMinePlus.CoMinePlus.getMemoryRSS"]], "getmemoryuss() (pami.correlatedpattern.basic.comine.comine method)": [[4, "PAMI.correlatedPattern.basic.CoMine.CoMine.getMemoryUSS"]], "getmemoryuss() (pami.correlatedpattern.basic.comineplus.comineplus method)": [[4, "PAMI.correlatedPattern.basic.CoMinePlus.CoMinePlus.getMemoryUSS"]], "getpatterns() (pami.correlatedpattern.basic.comine.comine method)": [[4, "PAMI.correlatedPattern.basic.CoMine.CoMine.getPatterns"]], "getpatterns() (pami.correlatedpattern.basic.comineplus.comineplus method)": [[4, "PAMI.correlatedPattern.basic.CoMinePlus.CoMinePlus.getPatterns"]], "getpatternsasdataframe() (pami.correlatedpattern.basic.comine.comine method)": [[4, "PAMI.correlatedPattern.basic.CoMine.CoMine.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.correlatedpattern.basic.comineplus.comineplus method)": [[4, "PAMI.correlatedPattern.basic.CoMinePlus.CoMinePlus.getPatternsAsDataFrame"]], "getruntime() (pami.correlatedpattern.basic.comine.comine method)": [[4, "PAMI.correlatedPattern.basic.CoMine.CoMine.getRuntime"]], "getruntime() (pami.correlatedpattern.basic.comineplus.comineplus method)": [[4, "PAMI.correlatedPattern.basic.CoMinePlus.CoMinePlus.getRuntime"]], "mine() (pami.correlatedpattern.basic.comine.comine method)": [[4, "PAMI.correlatedPattern.basic.CoMine.CoMine.mine"]], "mine() (pami.correlatedpattern.basic.comineplus.comineplus method)": [[4, "PAMI.correlatedPattern.basic.CoMinePlus.CoMinePlus.mine"]], "printresults() (pami.correlatedpattern.basic.comine.comine method)": [[4, "PAMI.correlatedPattern.basic.CoMine.CoMine.printResults"]], "printresults() (pami.correlatedpattern.basic.comineplus.comineplus method)": [[4, "PAMI.correlatedPattern.basic.CoMinePlus.CoMinePlus.printResults"]], "save() (pami.correlatedpattern.basic.comine.comine method)": [[4, "PAMI.correlatedPattern.basic.CoMine.CoMine.save"]], "save() (pami.correlatedpattern.basic.comineplus.comineplus method)": [[4, "PAMI.correlatedPattern.basic.CoMinePlus.CoMinePlus.save"]], "startmine() (pami.correlatedpattern.basic.comine.comine method)": [[4, "PAMI.correlatedPattern.basic.CoMine.CoMine.startMine"]], "startmine() (pami.correlatedpattern.basic.comineplus.comineplus method)": [[4, "PAMI.correlatedPattern.basic.CoMinePlus.CoMinePlus.startMine"]], "pami.coveragepattern": [[5, "module-PAMI.coveragePattern"]], "cmine (class in pami.coveragepattern.basic.cmine)": [[6, "PAMI.coveragePattern.basic.CMine.CMine"]], "cppg (class in pami.coveragepattern.basic.cppg)": [[6, "PAMI.coveragePattern.basic.CPPG.CPPG"]], "pami.coveragepattern.basic": [[6, "module-PAMI.coveragePattern.basic"]], "pami.coveragepattern.basic.cmine": [[6, "module-PAMI.coveragePattern.basic.CMine"]], "pami.coveragepattern.basic.cppg": [[6, "module-PAMI.coveragePattern.basic.CPPG"]], "pami.coveragepattern.basic.abstract": [[6, "module-PAMI.coveragePattern.basic.abstract"]], "creatingcoverageitems() (pami.coveragepattern.basic.cmine.cmine method)": [[6, "PAMI.coveragePattern.basic.CMine.CMine.creatingCoverageItems"]], "genpatterns() (pami.coveragepattern.basic.cmine.cmine method)": [[6, "PAMI.coveragePattern.basic.CMine.CMine.genPatterns"]], "generateallpatterns() (pami.coveragepattern.basic.cmine.cmine method)": [[6, "PAMI.coveragePattern.basic.CMine.CMine.generateAllPatterns"]], "getmemoryrss() (pami.coveragepattern.basic.cmine.cmine method)": [[6, "PAMI.coveragePattern.basic.CMine.CMine.getMemoryRSS"]], "getmemoryrss() (pami.coveragepattern.basic.cppg.cppg method)": [[6, "PAMI.coveragePattern.basic.CPPG.CPPG.getMemoryRSS"]], "getmemoryuss() (pami.coveragepattern.basic.cmine.cmine method)": [[6, "PAMI.coveragePattern.basic.CMine.CMine.getMemoryUSS"]], "getmemoryuss() (pami.coveragepattern.basic.cppg.cppg method)": [[6, "PAMI.coveragePattern.basic.CPPG.CPPG.getMemoryUSS"]], "getpatterns() (pami.coveragepattern.basic.cmine.cmine method)": [[6, "PAMI.coveragePattern.basic.CMine.CMine.getPatterns"]], "getpatterns() (pami.coveragepattern.basic.cppg.cppg method)": [[6, "PAMI.coveragePattern.basic.CPPG.CPPG.getPatterns"]], "getpatternsasdataframe() (pami.coveragepattern.basic.cmine.cmine method)": [[6, "PAMI.coveragePattern.basic.CMine.CMine.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.coveragepattern.basic.cppg.cppg method)": [[6, "PAMI.coveragePattern.basic.CPPG.CPPG.getPatternsAsDataFrame"]], "getruntime() (pami.coveragepattern.basic.cmine.cmine method)": [[6, "PAMI.coveragePattern.basic.CMine.CMine.getRuntime"]], "getruntime() (pami.coveragepattern.basic.cppg.cppg method)": [[6, "PAMI.coveragePattern.basic.CPPG.CPPG.getRuntime"]], "mine() (pami.coveragepattern.basic.cmine.cmine method)": [[6, "PAMI.coveragePattern.basic.CMine.CMine.mine"]], "mine() (pami.coveragepattern.basic.cppg.cppg method)": [[6, "PAMI.coveragePattern.basic.CPPG.CPPG.mine"]], "printresults() (pami.coveragepattern.basic.cmine.cmine method)": [[6, "PAMI.coveragePattern.basic.CMine.CMine.printResults"]], "printresults() (pami.coveragepattern.basic.cppg.cppg method)": [[6, "PAMI.coveragePattern.basic.CPPG.CPPG.printResults"]], "save() (pami.coveragepattern.basic.cmine.cmine method)": [[6, "PAMI.coveragePattern.basic.CMine.CMine.save"]], "save() (pami.coveragepattern.basic.cppg.cppg method)": [[6, "PAMI.coveragePattern.basic.CPPG.CPPG.save"]], "startmine() (pami.coveragepattern.basic.cmine.cmine method)": [[6, "PAMI.coveragePattern.basic.CMine.CMine.startMine"]], "startmine() (pami.coveragepattern.basic.cppg.cppg method)": [[6, "PAMI.coveragePattern.basic.CPPG.CPPG.startMine"]], "tidtobitset() (pami.coveragepattern.basic.cmine.cmine method)": [[6, "PAMI.coveragePattern.basic.CMine.CMine.tidToBitset"]], "pami.extras": [[7, "module-PAMI.extras"]], "pami.extras.generatelatexgraphfile": [[7, "module-PAMI.extras.generateLatexGraphFile"]], "pami.extras.plotpointonmap": [[7, "module-PAMI.extras.plotPointOnMap"]], "pami.extras.plotpointonmap_dump": [[7, "module-PAMI.extras.plotPointOnMap_dump"]], "pami.extras.scatterplotspatialpoints": [[7, "module-PAMI.extras.scatterPlotSpatialPoints"]], "pami.extras.topkpatterns": [[7, "module-PAMI.extras.topKPatterns"]], "pami.extras.uncertaindb_convert": [[7, "module-PAMI.extras.uncertaindb_convert"]], "convertpoint() (pami.extras.plotpointonmap.plotpointonmap method)": [[7, "PAMI.extras.plotPointOnMap.plotPointOnMap.convertPOINT"]], "convertpoint() (pami.extras.plotpointonmap_dump.plotpointonmap method)": [[7, "PAMI.extras.plotPointOnMap_dump.plotPointOnMap.convertPOINT"]], "findtopkpatterns() (pami.extras.plotpointonmap.plotpointonmap method)": [[7, "PAMI.extras.plotPointOnMap.plotPointOnMap.findTopKPatterns"]], "findtopkpatterns() (pami.extras.plotpointonmap_dump.plotpointonmap method)": [[7, "PAMI.extras.plotPointOnMap_dump.plotPointOnMap.findTopKPatterns"]], "generatelatexcode() (in module pami.extras.generatelatexgraphfile)": [[7, "PAMI.extras.generateLatexGraphFile.generateLatexCode"]], "generatelatexgraphfile (class in pami.extras.generatelatexgraphfile)": [[7, "PAMI.extras.generateLatexGraphFile.generateLatexGraphFile"]], "getbinarytransaction() (pami.extras.uncertaindb_convert.predictedclass2transaction method)": [[7, "PAMI.extras.uncertaindb_convert.predictedClass2Transaction.getBinaryTransaction"]], "gettopkpatterns() (pami.extras.topkpatterns.topkpatterns method)": [[7, "PAMI.extras.topKPatterns.topKPatterns.getTopKPatterns"]], "plotpointinmap() (pami.extras.plotpointonmap.plotpointonmap method)": [[7, "PAMI.extras.plotPointOnMap.plotPointOnMap.plotPointInMap"]], "plotpointinmap() (pami.extras.plotpointonmap_dump.plotpointonmap method)": [[7, "PAMI.extras.plotPointOnMap_dump.plotPointOnMap.plotPointInMap"]], "plotpointonmap (class in pami.extras.plotpointonmap)": [[7, "PAMI.extras.plotPointOnMap.plotPointOnMap"]], "plotpointonmap (class in pami.extras.plotpointonmap_dump)": [[7, "PAMI.extras.plotPointOnMap_dump.plotPointOnMap"]], "predictedclass2transaction (class in pami.extras.uncertaindb_convert)": [[7, "PAMI.extras.uncertaindb_convert.predictedClass2Transaction"]], "save() (pami.extras.topkpatterns.topkpatterns method)": [[7, "PAMI.extras.topKPatterns.topKPatterns.save"]], "scatterplotspatialpoints (class in pami.extras.scatterplotspatialpoints)": [[7, "PAMI.extras.scatterPlotSpatialPoints.scatterPlotSpatialPoints"]], "scatterplotspatialpoints() (pami.extras.scatterplotspatialpoints.scatterplotspatialpoints method)": [[7, "PAMI.extras.scatterPlotSpatialPoints.scatterPlotSpatialPoints.scatterPlotSpatialPoints"]], "topkpatterns (class in pami.extras.topkpatterns)": [[7, "PAMI.extras.topKPatterns.topKPatterns"]], "df2db (class in pami.extras.df2db.df2db)": [[8, "PAMI.extras.DF2DB.DF2DB.DF2DB"]], "denseformatdf (class in pami.extras.df2db.denseformatdf)": [[8, "PAMI.extras.DF2DB.DenseFormatDF.DenseFormatDF"]], "denseformatdf (class in pami.extras.df2db.densedf2db_dump)": [[8, "PAMI.extras.DF2DB.denseDF2DB_dump.DenseFormatDF"]], "denseformatdfplus (class in pami.extras.df2db.densedf2dbplus)": [[8, "PAMI.extras.DF2DB.denseDF2DBPlus.DenseFormatDFPlus"]], "pami.extras.df2db": [[8, "module-PAMI.extras.DF2DB"]], "pami.extras.df2db.df2db": [[8, "module-PAMI.extras.DF2DB.DF2DB"]], "pami.extras.df2db.denseformatdf": [[8, "module-PAMI.extras.DF2DB.DenseFormatDF"]], "pami.extras.df2db.sparseformatdf": [[8, "module-PAMI.extras.DF2DB.SparseFormatDF"]], "pami.extras.df2db.createtdb": [[8, "module-PAMI.extras.DF2DB.createTDB"]], "pami.extras.df2db.densedf2dbplus": [[8, "module-PAMI.extras.DF2DB.denseDF2DBPlus"]], "pami.extras.df2db.densedf2db_dump": [[8, "module-PAMI.extras.DF2DB.denseDF2DB_dump"]], "pami.extras.df2db.sparsedf2dbplus": [[8, "module-PAMI.extras.DF2DB.sparseDF2DBPlus"]], "sparseformatdf (class in pami.extras.df2db.sparseformatdf)": [[8, "PAMI.extras.DF2DB.SparseFormatDF.SparseFormatDF"]], "sparseformatdfplus (class in pami.extras.df2db.sparsedf2dbplus)": [[8, "PAMI.extras.DF2DB.sparseDF2DBPlus.SparseFormatDFPlus"]], "convert2multipletimeseries() (pami.extras.df2db.denseformatdf.denseformatdf method)": [[8, "PAMI.extras.DF2DB.DenseFormatDF.DenseFormatDF.convert2MultipleTimeSeries"]], "convert2temporaldatabase() (pami.extras.df2db.denseformatdf.denseformatdf method)": [[8, "PAMI.extras.DF2DB.DenseFormatDF.DenseFormatDF.convert2TemporalDatabase"]], "convert2transactionaldatabase() (pami.extras.df2db.denseformatdf.denseformatdf method)": [[8, "PAMI.extras.DF2DB.DenseFormatDF.DenseFormatDF.convert2TransactionalDatabase"]], "convert2uncertaintransactional() (pami.extras.df2db.denseformatdf.denseformatdf method)": [[8, "PAMI.extras.DF2DB.DenseFormatDF.DenseFormatDF.convert2UncertainTransactional"]], "convert2utilitydatabase() (pami.extras.df2db.denseformatdf.denseformatdf method)": [[8, "PAMI.extras.DF2DB.DenseFormatDF.DenseFormatDF.convert2UtilityDatabase"]], "createtdb (class in pami.extras.df2db.createtdb)": [[8, "PAMI.extras.DF2DB.createTDB.createTDB"]], "createtdb() (pami.extras.df2db.createtdb.createtdb method)": [[8, "PAMI.extras.DF2DB.createTDB.createTDB.createTDB"]], "createtemporal() (pami.extras.df2db.sparseformatdf.sparseformatdf method)": [[8, "PAMI.extras.DF2DB.SparseFormatDF.SparseFormatDF.createTemporal"]], "createtemporal() (pami.extras.df2db.densedf2dbplus.denseformatdfplus method)": [[8, "PAMI.extras.DF2DB.denseDF2DBPlus.DenseFormatDFPlus.createTemporal"]], "createtemporal() (pami.extras.df2db.densedf2db_dump.denseformatdf method)": [[8, "PAMI.extras.DF2DB.denseDF2DB_dump.DenseFormatDF.createTemporal"]], "createtemporal() (pami.extras.df2db.sparsedf2dbplus.sparseformatdfplus method)": [[8, "PAMI.extras.DF2DB.sparseDF2DBPlus.SparseFormatDFPlus.createTemporal"]], "createtransactional() (pami.extras.df2db.sparseformatdf.sparseformatdf method)": [[8, "PAMI.extras.DF2DB.SparseFormatDF.SparseFormatDF.createTransactional"]], "createtransactional() (pami.extras.df2db.densedf2dbplus.denseformatdfplus method)": [[8, "PAMI.extras.DF2DB.denseDF2DBPlus.DenseFormatDFPlus.createTransactional"]], "createtransactional() (pami.extras.df2db.densedf2db_dump.denseformatdf method)": [[8, "PAMI.extras.DF2DB.denseDF2DB_dump.DenseFormatDF.createTransactional"]], "createtransactional() (pami.extras.df2db.sparsedf2dbplus.sparseformatdfplus method)": [[8, "PAMI.extras.DF2DB.sparseDF2DBPlus.SparseFormatDFPlus.createTransactional"]], "createutility() (pami.extras.df2db.sparseformatdf.sparseformatdf method)": [[8, "PAMI.extras.DF2DB.SparseFormatDF.SparseFormatDF.createUtility"]], "createutility() (pami.extras.df2db.densedf2dbplus.denseformatdfplus method)": [[8, "PAMI.extras.DF2DB.denseDF2DBPlus.DenseFormatDFPlus.createUtility"]], "createutility() (pami.extras.df2db.densedf2db_dump.denseformatdf method)": [[8, "PAMI.extras.DF2DB.denseDF2DB_dump.DenseFormatDF.createUtility"]], "createutility() (pami.extras.df2db.sparsedf2dbplus.sparseformatdfplus method)": [[8, "PAMI.extras.DF2DB.sparseDF2DBPlus.SparseFormatDFPlus.createUtility"]], "getfilename() (pami.extras.df2db.denseformatdf.denseformatdf method)": [[8, "PAMI.extras.DF2DB.DenseFormatDF.DenseFormatDF.getFileName"]], "getfilename() (pami.extras.df2db.sparseformatdf.sparseformatdf method)": [[8, "PAMI.extras.DF2DB.SparseFormatDF.SparseFormatDF.getFileName"]], "getfilename() (pami.extras.df2db.densedf2dbplus.denseformatdfplus method)": [[8, "PAMI.extras.DF2DB.denseDF2DBPlus.DenseFormatDFPlus.getFileName"]], "getfilename() (pami.extras.df2db.densedf2db_dump.denseformatdf method)": [[8, "PAMI.extras.DF2DB.denseDF2DB_dump.DenseFormatDF.getFileName"]], "getfilename() (pami.extras.df2db.sparsedf2dbplus.sparseformatdfplus method)": [[8, "PAMI.extras.DF2DB.sparseDF2DBPlus.SparseFormatDFPlus.getFileName"]], "gettemporaldatabase() (pami.extras.df2db.df2db.df2db method)": [[8, "PAMI.extras.DF2DB.DF2DB.DF2DB.getTemporalDatabase"]], "gettransactionaldatabase() (pami.extras.df2db.df2db.df2db method)": [[8, "PAMI.extras.DF2DB.DF2DB.DF2DB.getTransactionalDatabase"]], "getutilitydatabase() (pami.extras.df2db.df2db.df2db method)": [[8, "PAMI.extras.DF2DB.DF2DB.DF2DB.getUtilityDatabase"]], "save() (pami.extras.df2db.createtdb.createtdb method)": [[8, "PAMI.extras.DF2DB.createTDB.createTDB.save"]], "pami.extras.calculatemisvalues": [[9, "module-PAMI.extras.calculateMISValues"]], "pami.extras.calculatemisvalues.usingbeta": [[9, "module-PAMI.extras.calculateMISValues.usingBeta"]], "pami.extras.calculatemisvalues.usingsd": [[9, "module-PAMI.extras.calculateMISValues.usingSD"]], "calculatemis() (pami.extras.calculatemisvalues.usingbeta.usingbeta method)": [[9, "PAMI.extras.calculateMISValues.usingBeta.usingBeta.calculateMIS"]], "calculatemis() (pami.extras.calculatemisvalues.usingsd.usingsd method)": [[9, "PAMI.extras.calculateMISValues.usingSD.usingSD.calculateMIS"]], "getdataframe() (pami.extras.calculatemisvalues.usingsd.usingsd method)": [[9, "PAMI.extras.calculateMISValues.usingSD.usingSD.getDataFrame"]], "getmisdataframe() (pami.extras.calculatemisvalues.usingbeta.usingbeta method)": [[9, "PAMI.extras.calculateMISValues.usingBeta.usingBeta.getMISDataFrame"]], "save() (pami.extras.calculatemisvalues.usingbeta.usingbeta method)": [[9, "PAMI.extras.calculateMISValues.usingBeta.usingBeta.save"]], "save() (pami.extras.calculatemisvalues.usingsd.usingsd method)": [[9, "PAMI.extras.calculateMISValues.usingSD.usingSD.save"]], "usingbeta (class in pami.extras.calculatemisvalues.usingbeta)": [[9, "PAMI.extras.calculateMISValues.usingBeta.usingBeta"]], "usingsd (class in pami.extras.calculatemisvalues.usingsd)": [[9, "PAMI.extras.calculateMISValues.usingSD.usingSD"]], "fuzzydatabase (class in pami.extras.dbstats.fuzzydatabase)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase"]], "multipletimeseriesfuzzydatabasestats (class in pami.extras.dbstats.multipletimeseriesfuzzydatabasestats)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats"]], "pami.extras.dbstats": [[10, "module-PAMI.extras.dbStats"]], "pami.extras.dbstats.fuzzydatabase": [[10, "module-PAMI.extras.dbStats.FuzzyDatabase"]], "pami.extras.dbstats.multipletimeseriesfuzzydatabasestats": [[10, "module-PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats"]], "pami.extras.dbstats.sequentialdatabase": [[10, "module-PAMI.extras.dbStats.SequentialDatabase"]], "pami.extras.dbstats.temporaldatabase": [[10, "module-PAMI.extras.dbStats.TemporalDatabase"]], "pami.extras.dbstats.transactionaldatabase": [[10, "module-PAMI.extras.dbStats.TransactionalDatabase"]], "pami.extras.dbstats.uncertaintemporaldatabase": [[10, "module-PAMI.extras.dbStats.UncertainTemporalDatabase"]], "pami.extras.dbstats.uncertaintransactionaldatabase": [[10, "module-PAMI.extras.dbStats.UncertainTransactionalDatabase"]], "pami.extras.dbstats.utilitydatabase": [[10, "module-PAMI.extras.dbStats.UtilityDatabase"]], "sequentialdatabase (class in pami.extras.dbstats.sequentialdatabase)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase"]], "temporaldatabase (class in pami.extras.dbstats.temporaldatabase)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase"]], "transactionaldatabase (class in pami.extras.dbstats.transactionaldatabase)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase"]], "uncertaintemporaldatabase (class in pami.extras.dbstats.uncertaintemporaldatabase)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase"]], "uncertaintransactionaldatabase (class in pami.extras.dbstats.uncertaintransactionaldatabase)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase"]], "utilitydatabase (class in pami.extras.dbstats.utilitydatabase)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase"]], "convertdataintomatrix() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.convertDataIntoMatrix"]], "convertdataintomatrix() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.convertDataIntoMatrix"]], "convertdataintomatrix() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.convertDataIntoMatrix"]], "convertdataintomatrix() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.convertDataIntoMatrix"]], "convertdataintomatrix() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.convertDataIntoMatrix"]], "creatingitemsets() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.creatingItemSets"]], "creatingitemsets() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.creatingItemSets"]], "getaverageinterarrivalperiod() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getAverageInterArrivalPeriod"]], "getaverageitempersequencelength() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.getAverageItemPerSequenceLength"]], "getaverageitempersubsequencelength() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.getAverageItemPerSubsequenceLength"]], "getaverageperiod() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.getAveragePeriod"]], "getaverageperiodofitem() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getAveragePeriodOfItem"]], "getaveragesubsequencepersequencelength() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.getAverageSubsequencePerSequenceLength"]], "getaveragetransactionlength() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.getAverageTransactionLength"]], "getaveragetransactionlength() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.getAverageTransactionLength"]], "getaveragetransactionlength() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getAverageTransactionLength"]], "getaveragetransactionlength() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.getAverageTransactionLength"]], "getaveragetransactionlength() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.getAverageTransactionLength"]], "getaveragetransactionlength() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.getAverageTransactionLength"]], "getaveragetransactionlength() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.getAverageTransactionLength"]], "getaverageutility() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.getAverageUtility"]], "getaverageutility() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.getAverageUtility"]], "getdatabasesize() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.getDatabaseSize"]], "getdatabasesize() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.getDatabaseSize"]], "getdatabasesize() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.getDatabaseSize"]], "getdatabasesize() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getDatabaseSize"]], "getdatabasesize() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.getDatabaseSize"]], "getdatabasesize() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.getDatabaseSize"]], "getdatabasesize() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.getDatabaseSize"]], "getdatabasesize() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.getDatabaseSize"]], "getdensity() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.getDensity"]], "getdensity() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getDensity"]], "getdensity() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.getDensity"]], "getdensity() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.getDensity"]], "getdensity() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.getDensity"]], "getfrequenciesinrange() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.getFrequenciesInRange"]], "getfrequenciesinrange() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.getFrequenciesInRange"]], "getfrequenciesinrange() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.getFrequenciesInRange"]], "getfrequenciesinrange() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getFrequenciesInRange"]], "getfrequenciesinrange() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.getFrequenciesInRange"]], "getfrequenciesinrange() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.getFrequenciesInRange"]], "getfrequenciesinrange() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.getFrequenciesInRange"]], "getfrequenciesinrange() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.getFrequenciesInRange"]], "getmaximuminterarrivalperiod() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getMaximumInterArrivalPeriod"]], "getmaximumperiod() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.getMaximumPeriod"]], "getmaximumperiodofitem() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getMaximumPeriodOfItem"]], "getmaximumsequencelength() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.getMaximumSequenceLength"]], "getmaximumsubsequencelength() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.getMaximumSubsequenceLength"]], "getmaximumtransactionlength() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.getMaximumTransactionLength"]], "getmaximumtransactionlength() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.getMaximumTransactionLength"]], "getmaximumtransactionlength() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getMaximumTransactionLength"]], "getmaximumtransactionlength() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.getMaximumTransactionLength"]], "getmaximumtransactionlength() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.getMaximumTransactionLength"]], "getmaximumtransactionlength() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.getMaximumTransactionLength"]], "getmaximumtransactionlength() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.getMaximumTransactionLength"]], "getmaximumutility() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.getMaximumUtility"]], "getmaximumutility() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.getMaximumUtility"]], "getminimuminterarrivalperiod() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getMinimumInterArrivalPeriod"]], "getminimumperiod() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.getMinimumPeriod"]], "getminimumperiodofitem() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getMinimumPeriodOfItem"]], "getminimumsequencelength() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.getMinimumSequenceLength"]], "getminimumsubsequencelength() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.getMinimumSubsequenceLength"]], "getminimumtransactionlength() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.getMinimumTransactionLength"]], "getminimumtransactionlength() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.getMinimumTransactionLength"]], "getminimumtransactionlength() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getMinimumTransactionLength"]], "getminimumtransactionlength() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.getMinimumTransactionLength"]], "getminimumtransactionlength() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.getMinimumTransactionLength"]], "getminimumtransactionlength() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.getMinimumTransactionLength"]], "getminimumtransactionlength() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.getMinimumTransactionLength"]], "getminimumutility() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.getMinimumUtility"]], "getminimumutility() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.getMinimumUtility"]], "getnumberofitems() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.getNumberOfItems"]], "getnumberofitems() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.getNumberOfItems"]], "getnumberofitems() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.getNumberOfItems"]], "getnumberofitems() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.getNumberOfItems"]], "getnumberofitems() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.getNumberOfItems"]], "getnumberoftransactionspertimestamp() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getNumberOfTransactionsPerTimestamp"]], "getnumberoftransactionspertimestamp() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.getNumberOfTransactionsPerTimestamp"]], "getperiodsinrange() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getPeriodsInRange"]], "getsequencesize() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.getSequenceSize"]], "getsequenciallengthdistribution() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.getSequencialLengthDistribution"]], "getsortedlistofitemfrequencies() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.getSortedListOfItemFrequencies"]], "getsortedlistofitemfrequencies() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.getSortedListOfItemFrequencies"]], "getsortedlistofitemfrequencies() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.getSortedListOfItemFrequencies"]], "getsortedlistofitemfrequencies() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getSortedListOfItemFrequencies"]], "getsortedlistofitemfrequencies() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.getSortedListOfItemFrequencies"]], "getsortedlistofitemfrequencies() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.getSortedListOfItemFrequencies"]], "getsortedlistofitemfrequencies() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.getSortedListOfItemFrequencies"]], "getsortedlistofitemfrequencies() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.getSortedListOfItemFrequencies"]], "getsortedutilityvaluesofitem() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.getSortedUtilityValuesOfItem"]], "getsortedutilityvaluesofitem() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.getSortedUtilityValuesOfItem"]], "getsparsity() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.getSparsity"]], "getsparsity() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.getSparsity"]], "getsparsity() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getSparsity"]], "getsparsity() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.getSparsity"]], "getsparsity() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.getSparsity"]], "getsparsity() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.getSparsity"]], "getsparsity() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.getSparsity"]], "getstandarddeviationperiod() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getStandardDeviationPeriod"]], "getstandarddeviationperiod() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.getStandardDeviationPeriod"]], "getstandarddeviationsequencelength() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.getStandardDeviationSequenceLength"]], "getstandarddeviationsubsequencelength() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.getStandardDeviationSubsequenceLength"]], "getstandarddeviationtransactionlength() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.getStandardDeviationTransactionLength"]], "getstandarddeviationtransactionlength() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.getStandardDeviationTransactionLength"]], "getstandarddeviationtransactionlength() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getStandardDeviationTransactionLength"]], "getstandarddeviationtransactionlength() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.getStandardDeviationTransactionLength"]], "getstandarddeviationtransactionlength() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.getStandardDeviationTransactionLength"]], "getstandarddeviationtransactionlength() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.getStandardDeviationTransactionLength"]], "getstandarddeviationtransactionlength() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.getStandardDeviationTransactionLength"]], "getsubsequenciallengthdistribution() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.getSubsequencialLengthDistribution"]], "gettotalnumberofitems() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.getTotalNumberOfItems"]], "gettotalnumberofitems() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.getTotalNumberOfItems"]], "gettotalnumberofitems() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.getTotalNumberOfItems"]], "gettotalnumberofitems() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getTotalNumberOfItems"]], "gettotalnumberofitems() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.getTotalNumberOfItems"]], "gettotalnumberofitems() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.getTotalNumberOfItems"]], "gettotalnumberofitems() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.getTotalNumberOfItems"]], "gettotalnumberofitems() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.getTotalNumberOfItems"]], "gettotalutility() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.getTotalUtility"]], "gettotalutility() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.getTotalUtility"]], "gettransanctionallengthdistribution() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.getTransanctionalLengthDistribution"]], "gettransanctionallengthdistribution() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.getTransanctionalLengthDistribution"]], "gettransanctionallengthdistribution() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getTransanctionalLengthDistribution"]], "gettransanctionallengthdistribution() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.getTransanctionalLengthDistribution"]], "gettransanctionallengthdistribution() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.getTransanctionalLengthDistribution"]], "gettransanctionallengthdistribution() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.getTransanctionalLengthDistribution"]], "gettransanctionallengthdistribution() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.getTransanctionalLengthDistribution"]], "getvariancesequencelength() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.getVarianceSequenceLength"]], "getvariancesubsequencelength() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.getVarianceSubsequenceLength"]], "getvariancetransactionlength() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.getVarianceTransactionLength"]], "getvariancetransactionlength() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.getVarianceTransactionLength"]], "getvariancetransactionlength() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.getVarianceTransactionLength"]], "getvariancetransactionlength() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.getVarianceTransactionLength"]], "getvariancetransactionlength() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.getVarianceTransactionLength"]], "getvariancetransactionlength() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.getVarianceTransactionLength"]], "getvariancetransactionlength() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.getVarianceTransactionLength"]], "plotgraphs() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.plotGraphs"]], "plotgraphs() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.plotGraphs"]], "plotgraphs() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.plotGraphs"]], "plotgraphs() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.plotGraphs"]], "plotgraphs() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.plotGraphs"]], "plotgraphs() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.plotGraphs"]], "plotgraphs() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.plotGraphs"]], "plotgraphs() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.plotGraphs"]], "printstats() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.printStats"]], "printstats() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.printStats"]], "printstats() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.printStats"]], "printstats() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.printStats"]], "printstats() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.printStats"]], "printstats() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.printStats"]], "printstats() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.printStats"]], "printstats() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.printStats"]], "readdatabase() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.readDatabase"]], "readdatabase() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.readDatabase"]], "readdatabase() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.readDatabase"]], "readdatabase() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.readDatabase"]], "readdatabase() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.readDatabase"]], "readdatabase() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.readDatabase"]], "readdatabase() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.readDatabase"]], "readdatabase() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.readDatabase"]], "run() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.run"]], "run() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.run"]], "run() (pami.extras.dbstats.sequentialdatabase.sequentialdatabase method)": [[10, "PAMI.extras.dbStats.SequentialDatabase.SequentialDatabase.run"]], "run() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.run"]], "run() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.run"]], "run() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.run"]], "run() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.run"]], "run() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.run"]], "save() (pami.extras.dbstats.fuzzydatabase.fuzzydatabase method)": [[10, "PAMI.extras.dbStats.FuzzyDatabase.FuzzyDatabase.save"]], "save() (pami.extras.dbstats.multipletimeseriesfuzzydatabasestats.multipletimeseriesfuzzydatabasestats method)": [[10, "PAMI.extras.dbStats.MultipleTimeSeriesFuzzyDatabaseStats.MultipleTimeSeriesFuzzyDatabaseStats.save"]], "save() (pami.extras.dbstats.temporaldatabase.temporaldatabase method)": [[10, "PAMI.extras.dbStats.TemporalDatabase.TemporalDatabase.save"]], "save() (pami.extras.dbstats.transactionaldatabase.transactionaldatabase method)": [[10, "PAMI.extras.dbStats.TransactionalDatabase.TransactionalDatabase.save"]], "save() (pami.extras.dbstats.uncertaintemporaldatabase.uncertaintemporaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTemporalDatabase.UncertainTemporalDatabase.save"]], "save() (pami.extras.dbstats.uncertaintransactionaldatabase.uncertaintransactionaldatabase method)": [[10, "PAMI.extras.dbStats.UncertainTransactionalDatabase.UncertainTransactionalDatabase.save"]], "save() (pami.extras.dbstats.utilitydatabase.utilitydatabase method)": [[10, "PAMI.extras.dbStats.UtilityDatabase.UtilityDatabase.save"]], "pami.extras.fuzzytransformation": [[11, "module-PAMI.extras.fuzzyTransformation"]], "pami.extras.fuzzytransformation.abstract": [[11, "module-PAMI.extras.fuzzyTransformation.abstract"]], "pami.extras.fuzzytransformation.temporaltofuzzy": [[11, "module-PAMI.extras.fuzzyTransformation.temporalToFuzzy"]], "pami.extras.fuzzytransformation.transactionaltofuzzy": [[11, "module-PAMI.extras.fuzzyTransformation.transactionalToFuzzy"]], "startconvert() (pami.extras.fuzzytransformation.temporaltofuzzy.temporaltofuzzy method)": [[11, "PAMI.extras.fuzzyTransformation.temporalToFuzzy.temporalToFuzzy.startConvert"]], "startconvert() (pami.extras.fuzzytransformation.transactionaltofuzzy.transactionaltofuzzy method)": [[11, "PAMI.extras.fuzzyTransformation.transactionalToFuzzy.transactionalToFuzzy.startConvert"]], "temporaltofuzzy (class in pami.extras.fuzzytransformation.temporaltofuzzy)": [[11, "PAMI.extras.fuzzyTransformation.temporalToFuzzy.temporalToFuzzy"]], "transactionaltofuzzy (class in pami.extras.fuzzytransformation.transactionaltofuzzy)": [[11, "PAMI.extras.fuzzyTransformation.transactionalToFuzzy.transactionalToFuzzy"]], "pami.extras.generatedatabase": [[12, "module-PAMI.extras.generateDatabase"]], "pami.extras.generatedatabase.generatespatiotemporaldatabase": [[12, "module-PAMI.extras.generateDatabase.generateSpatioTemporalDatabase"]], "pami.extras.generatedatabase.generatetemporaldatabase": [[12, "module-PAMI.extras.generateDatabase.generateTemporalDatabase"]], "pami.extras.generatedatabase.generatetransactionaldatabase": [[12, "module-PAMI.extras.generateDatabase.generateTransactionalDatabase"]], "alreadyadded (pami.extras.generatedatabase.generatespatiotemporaldatabase.spatiotemporaldatabasegenerator attribute)": [[12, "PAMI.extras.generateDatabase.generateSpatioTemporalDatabase.spatioTemporalDatabaseGenerator.alreadyAdded"]], "coinflip (pami.extras.generatedatabase.generatespatiotemporaldatabase.spatiotemporaldatabasegenerator attribute)": [[12, "PAMI.extras.generateDatabase.generateSpatioTemporalDatabase.spatioTemporalDatabaseGenerator.coinFlip"]], "create() (pami.extras.generatedatabase.generatetransactionaldatabase.generatetransactionaldatabase method)": [[12, "PAMI.extras.generateDatabase.generateTransactionalDatabase.generateTransactionalDatabase.create"]], "createpoint() (pami.extras.generatedatabase.generatespatiotemporaldatabase.spatiotemporaldatabasegenerator method)": [[12, "PAMI.extras.generateDatabase.generateSpatioTemporalDatabase.spatioTemporalDatabaseGenerator.createPoint"]], "createtemporalfile() (pami.extras.generatedatabase.generatetemporaldatabase.generatetemporaldatabase method)": [[12, "PAMI.extras.generateDatabase.generateTemporalDatabase.generateTemporalDatabase.createTemporalFile"]], "generatearray() (pami.extras.generatedatabase.generatetransactionaldatabase.generatetransactionaldatabase method)": [[12, "PAMI.extras.generateDatabase.generateTransactionalDatabase.generateTransactionalDatabase.generateArray"]], "generatetemporaldatabase (class in pami.extras.generatedatabase.generatetemporaldatabase)": [[12, "PAMI.extras.generateDatabase.generateTemporalDatabase.generateTemporalDatabase"]], "generatetransactionaldatabase (class in pami.extras.generatedatabase.generatetransactionaldatabase)": [[12, "PAMI.extras.generateDatabase.generateTransactionalDatabase.generateTransactionalDatabase"]], "getdatabaseasdataframe() (pami.extras.generatedatabase.generatetemporaldatabase.generatetemporaldatabase method)": [[12, "PAMI.extras.generateDatabase.generateTemporalDatabase.generateTemporalDatabase.getDatabaseAsDataFrame"]], "getfilename() (pami.extras.generatedatabase.generatetemporaldatabase.generatetemporaldatabase method)": [[12, "PAMI.extras.generateDatabase.generateTemporalDatabase.generateTemporalDatabase.getFileName"]], "gettransactions() (pami.extras.generatedatabase.generatetransactionaldatabase.generatetransactionaldatabase method)": [[12, "PAMI.extras.generateDatabase.generateTransactionalDatabase.generateTransactionalDatabase.getTransactions"]], "items (pami.extras.generatedatabase.generatespatiotemporaldatabase.spatiotemporaldatabasegenerator attribute)": [[12, "PAMI.extras.generateDatabase.generateSpatioTemporalDatabase.spatioTemporalDatabaseGenerator.items"]], "outfilename (pami.extras.generatedatabase.generatespatiotemporaldatabase.spatiotemporaldatabasegenerator attribute)": [[12, "PAMI.extras.generateDatabase.generateSpatioTemporalDatabase.spatioTemporalDatabaseGenerator.outFileName"]], "performcoinflip() (pami.extras.generatedatabase.generatetemporaldatabase.generatetemporaldatabase method)": [[12, "PAMI.extras.generateDatabase.generateTemporalDatabase.generateTemporalDatabase.performCoinFlip"]], "save() (pami.extras.generatedatabase.generatetransactionaldatabase.generatetransactionaldatabase method)": [[12, "PAMI.extras.generateDatabase.generateTransactionalDatabase.generateTransactionalDatabase.save"]], "saveasfile() (pami.extras.generatedatabase.generatespatiotemporaldatabase.spatiotemporaldatabasegenerator method)": [[12, "PAMI.extras.generateDatabase.generateSpatioTemporalDatabase.spatioTemporalDatabaseGenerator.saveAsFile"]], "spatiotemporaldatabasegenerator (class in pami.extras.generatedatabase.generatespatiotemporaldatabase)": [[12, "PAMI.extras.generateDatabase.generateSpatioTemporalDatabase.spatioTemporalDatabaseGenerator"]], "timestamp (pami.extras.generatedatabase.generatespatiotemporaldatabase.spatiotemporaldatabasegenerator attribute)": [[12, "PAMI.extras.generateDatabase.generateSpatioTemporalDatabase.spatioTemporalDatabaseGenerator.timestamp"]], "tuning() (pami.extras.generatedatabase.generatetemporaldatabase.generatetemporaldatabase method)": [[12, "PAMI.extras.generateDatabase.generateTemporalDatabase.generateTemporalDatabase.tuning"]], "tuning() (pami.extras.generatedatabase.generatetransactionaldatabase.generatetransactionaldatabase method)": [[12, "PAMI.extras.generateDatabase.generateTransactionalDatabase.generateTransactionalDatabase.tuning"]], "df2fig (class in pami.extras.graph.df2fig)": [[13, "PAMI.extras.graph.DF2Fig.DF2Fig"]], "pami.extras.graph": [[13, "module-PAMI.extras.graph"]], "pami.extras.graph.df2fig": [[13, "module-PAMI.extras.graph.DF2Fig"]], "pami.extras.graph.plotlinegraphfromdictionary": [[13, "module-PAMI.extras.graph.plotLineGraphFromDictionary"]], "pami.extras.graph.plotlinegraphsfromdataframe": [[13, "module-PAMI.extras.graph.plotLineGraphsFromDataFrame"]], "pami.extras.graph.visualizefuzzypatterns": [[13, "module-PAMI.extras.graph.visualizeFuzzyPatterns"]], "pami.extras.graph.visualizepatterns": [[13, "module-PAMI.extras.graph.visualizePatterns"]], "plot() (pami.extras.graph.df2fig.df2fig method)": [[13, "PAMI.extras.graph.DF2Fig.DF2Fig.plot"]], "plotgraphsfromdataframe (class in pami.extras.graph.plotlinegraphsfromdataframe)": [[13, "PAMI.extras.graph.plotLineGraphsFromDataFrame.plotGraphsFromDataFrame"]], "plotgraphsfromdataframe() (pami.extras.graph.plotlinegraphsfromdataframe.plotgraphsfromdataframe method)": [[13, "PAMI.extras.graph.plotLineGraphsFromDataFrame.plotGraphsFromDataFrame.plotGraphsFromDataFrame"]], "plotlinegraphfromdictionary (class in pami.extras.graph.plotlinegraphfromdictionary)": [[13, "PAMI.extras.graph.plotLineGraphFromDictionary.plotLineGraphFromDictionary"]], "visualize() (pami.extras.graph.visualizefuzzypatterns.visualizefuzzypatterns method)": [[13, "PAMI.extras.graph.visualizeFuzzyPatterns.visualizeFuzzyPatterns.visualize"]], "visualize() (pami.extras.graph.visualizepatterns.visualizepatterns method)": [[13, "PAMI.extras.graph.visualizePatterns.visualizePatterns.visualize"]], "visualizefuzzypatterns (class in pami.extras.graph.visualizefuzzypatterns)": [[13, "PAMI.extras.graph.visualizeFuzzyPatterns.visualizeFuzzyPatterns"]], "visualizepatterns (class in pami.extras.graph.visualizepatterns)": [[13, "PAMI.extras.graph.visualizePatterns.visualizePatterns"]], "pami.extras.image2database": [[14, "module-PAMI.extras.image2Database"]], "pami.extras.imageprocessing": [[15, "module-PAMI.extras.imageProcessing"]], "pami.extras.imageprocessing.imagery2databases": [[15, "module-PAMI.extras.imageProcessing.imagery2Databases"]], "createdatabase (class in pami.extras.imageprocessing.imagery2databases)": [[15, "PAMI.extras.imageProcessing.imagery2Databases.createDatabase"]], "getdataframe() (pami.extras.imageprocessing.imagery2databases.createdatabase method)": [[15, "PAMI.extras.imageProcessing.imagery2Databases.createDatabase.getDataFrame"]], "saveastemporaldb() (pami.extras.imageprocessing.imagery2databases.createdatabase method)": [[15, "PAMI.extras.imageProcessing.imagery2Databases.createDatabase.saveAsTemporalDB"]], "saveastransactionaldb() (pami.extras.imageprocessing.imagery2databases.createdatabase method)": [[15, "PAMI.extras.imageProcessing.imagery2Databases.createDatabase.saveAsTransactionalDB"]], "saveasuncertaintemporaldb() (pami.extras.imageprocessing.imagery2databases.createdatabase method)": [[15, "PAMI.extras.imageProcessing.imagery2Databases.createDatabase.saveAsUncertainTemporalDB"]], "saveasuncertaintransactionaldb() (pami.extras.imageprocessing.imagery2databases.createdatabase method)": [[15, "PAMI.extras.imageProcessing.imagery2Databases.createDatabase.saveAsUncertainTransactionalDB"]], "saveasutilitytemporaldb() (pami.extras.imageprocessing.imagery2databases.createdatabase method)": [[15, "PAMI.extras.imageProcessing.imagery2Databases.createDatabase.saveAsUtilityTemporalDB"]], "saveasutilitytransactionaldb() (pami.extras.imageprocessing.imagery2databases.createdatabase method)": [[15, "PAMI.extras.imageProcessing.imagery2Databases.createDatabase.saveAsUtilityTransactionalDB"]], "pami.extras.messaging": [[16, "module-PAMI.extras.messaging"]], "pami.extras.messaging.discord": [[16, "module-PAMI.extras.messaging.discord"]], "pami.extras.messaging.gmail": [[16, "module-PAMI.extras.messaging.gmail"]], "discord (class in pami.extras.messaging.discord)": [[16, "PAMI.extras.messaging.discord.discord"]], "gmail (class in pami.extras.messaging.gmail)": [[16, "PAMI.extras.messaging.gmail.gmail"]], "send() (pami.extras.messaging.discord.discord method)": [[16, "PAMI.extras.messaging.discord.discord.send"]], "send() (pami.extras.messaging.gmail.gmail method)": [[16, "PAMI.extras.messaging.gmail.gmail.send"]], "pami.extras.neighbours": [[17, "module-PAMI.extras.neighbours"]], "pami.extras.neighbours.findneighborsusingeuclideandistanceforpointinfo": [[17, "module-PAMI.extras.neighbours.findNeighborsUsingEuclideanDistanceforPointInfo"]], "pami.extras.neighbours.findneighboursusingeuclidean": [[17, "module-PAMI.extras.neighbours.findNeighboursUsingEuclidean"]], "pami.extras.neighbours.findneighboursusinggeodesic": [[17, "module-PAMI.extras.neighbours.findNeighboursUsingGeodesic"]], "createneighborhoodfileusingeuclideandistance (class in pami.extras.neighbours.findneighborsusingeuclideandistanceforpointinfo)": [[17, "PAMI.extras.neighbours.findNeighborsUsingEuclideanDistanceforPointInfo.createNeighborhoodFileUsingEuclideanDistance"]], "createneighborhoodfileusingeuclideandistance (class in pami.extras.neighbours.findneighboursusingeuclidean)": [[17, "PAMI.extras.neighbours.findNeighboursUsingEuclidean.createNeighborhoodFileUsingEuclideanDistance"]], "createneighborhoodfileusinggeodesicdistance (class in pami.extras.neighbours.findneighboursusinggeodesic)": [[17, "PAMI.extras.neighbours.findNeighboursUsingGeodesic.createNeighborhoodFileUsingGeodesicDistance"]], "getfilename() (pami.extras.neighbours.findneighborsusingeuclideandistanceforpointinfo.createneighborhoodfileusingeuclideandistance method)": [[17, "PAMI.extras.neighbours.findNeighborsUsingEuclideanDistanceforPointInfo.createNeighborhoodFileUsingEuclideanDistance.getFileName"]], "getfilename() (pami.extras.neighbours.findneighboursusingeuclidean.createneighborhoodfileusingeuclideandistance method)": [[17, "PAMI.extras.neighbours.findNeighboursUsingEuclidean.createNeighborhoodFileUsingEuclideanDistance.getFileName"]], "getfilename() (pami.extras.neighbours.findneighboursusinggeodesic.createneighborhoodfileusinggeodesicdistance method)": [[17, "PAMI.extras.neighbours.findNeighboursUsingGeodesic.createNeighborhoodFileUsingGeodesicDistance.getFileName"]], "pami.extras.sampledatasets": [[18, "module-PAMI.extras.sampleDatasets"]], "pami.extras.stats": [[19, "module-PAMI.extras.stats"]], "pami.extras.stats.transactionaldatabase": [[19, "module-PAMI.extras.stats.TransactionalDatabase"]], "pami.extras.stats.graphdatabase": [[19, "module-PAMI.extras.stats.graphDatabase"]], "pami.extras.stats.sequentialdatabase": [[19, "module-PAMI.extras.stats.sequentialDatabase"]], "pami.extras.stats.temporaldatabase": [[19, "module-PAMI.extras.stats.temporalDatabase"]], "pami.extras.stats.utilitydatabase": [[19, "module-PAMI.extras.stats.utilityDatabase"]], "transactionaldatabase (class in pami.extras.stats.transactionaldatabase)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase"]], "convertdataintomatrix() (pami.extras.stats.transactionaldatabase.transactionaldatabase method)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase.convertDataIntoMatrix"]], "convertdataintomatrix() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.convertDataIntoMatrix"]], "creatingitemsets() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.creatingItemSets"]], "getaverageinterarrivalperiod() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getAverageInterArrivalPeriod"]], "getaverageitempersequencelength() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.getAverageItemPerSequenceLength"]], "getaverageitempersubsequencelength() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.getAverageItemPerSubsequenceLength"]], "getaverageperiodofitem() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getAveragePeriodOfItem"]], "getaveragesubsequencepersequencelength() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.getAverageSubsequencePerSequenceLength"]], "getaveragetransactionlength() (pami.extras.stats.transactionaldatabase.transactionaldatabase method)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase.getAverageTransactionLength"]], "getaveragetransactionlength() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getAverageTransactionLength"]], "getaveragetransactionlength() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.getAverageTransactionLength"]], "getaverageutility() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.getAverageUtility"]], "getdatabasesize() (pami.extras.stats.transactionaldatabase.transactionaldatabase method)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase.getDatabaseSize"]], "getdatabasesize() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.getDatabaseSize"]], "getdatabasesize() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getDatabaseSize"]], "getdatabasesize() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.getDatabaseSize"]], "getdensity() (pami.extras.stats.transactionaldatabase.transactionaldatabase method)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase.getDensity"]], "getdensity() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getDensity"]], "getfrequenciesinrange() (pami.extras.stats.transactionaldatabase.transactionaldatabase method)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase.getFrequenciesInRange"]], "getfrequenciesinrange() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.getFrequenciesInRange"]], "getfrequenciesinrange() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getFrequenciesInRange"]], "getfrequenciesinrange() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.getFrequenciesInRange"]], "getmaximuminterarrivalperiod() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getMaximumInterArrivalPeriod"]], "getmaximumperiodofitem() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getMaximumPeriodOfItem"]], "getmaximumsequencelength() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.getMaximumSequenceLength"]], "getmaximumsubsequencelength() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.getMaximumSubsequenceLength"]], "getmaximumtransactionlength() (pami.extras.stats.transactionaldatabase.transactionaldatabase method)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase.getMaximumTransactionLength"]], "getmaximumtransactionlength() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getMaximumTransactionLength"]], "getmaximumtransactionlength() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.getMaximumTransactionLength"]], "getmaximumutility() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.getMaximumUtility"]], "getminimuminterarrivalperiod() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getMinimumInterArrivalPeriod"]], "getminimumperiodofitem() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getMinimumPeriodOfItem"]], "getminimumsequencelength() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.getMinimumSequenceLength"]], "getminimumsubsequencelength() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.getMinimumSubsequenceLength"]], "getminimumtransactionlength() (pami.extras.stats.transactionaldatabase.transactionaldatabase method)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase.getMinimumTransactionLength"]], "getminimumtransactionlength() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getMinimumTransactionLength"]], "getminimumtransactionlength() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.getMinimumTransactionLength"]], "getminimumutility() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.getMinimumUtility"]], "getnumberofitems() (pami.extras.stats.transactionaldatabase.transactionaldatabase method)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase.getNumberOfItems"]], "getnumberofitems() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.getNumberOfItems"]], "getnumberoftransactionspertimestamp() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getNumberOfTransactionsPerTimestamp"]], "getperiodsinrange() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getPeriodsInRange"]], "getsequencesize() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.getSequenceSize"]], "getsequenciallengthdistribution() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.getSequencialLengthDistribution"]], "getsortedlistofitemfrequencies() (pami.extras.stats.transactionaldatabase.transactionaldatabase method)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase.getSortedListOfItemFrequencies"]], "getsortedlistofitemfrequencies() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.getSortedListOfItemFrequencies"]], "getsortedlistofitemfrequencies() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getSortedListOfItemFrequencies"]], "getsortedlistofitemfrequencies() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.getSortedListOfItemFrequencies"]], "getsortedutilityvaluesofitem() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.getSortedUtilityValuesOfItem"]], "getsparsity() (pami.extras.stats.transactionaldatabase.transactionaldatabase method)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase.getSparsity"]], "getsparsity() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getSparsity"]], "getsparsity() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.getSparsity"]], "getstandarddeviationperiod() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getStandardDeviationPeriod"]], "getstandarddeviationsequencelength() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.getStandardDeviationSequenceLength"]], "getstandarddeviationsubsequencelength() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.getStandardDeviationSubsequenceLength"]], "getstandarddeviationtransactionlength() (pami.extras.stats.transactionaldatabase.transactionaldatabase method)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase.getStandardDeviationTransactionLength"]], "getstandarddeviationtransactionlength() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getStandardDeviationTransactionLength"]], "getstandarddeviationtransactionlength() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.getStandardDeviationTransactionLength"]], "getsubsequenciallengthdistribution() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.getSubsequencialLengthDistribution"]], "gettotalnumberofitems() (pami.extras.stats.transactionaldatabase.transactionaldatabase method)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase.getTotalNumberOfItems"]], "gettotalnumberofitems() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.getTotalNumberOfItems"]], "gettotalnumberofitems() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getTotalNumberOfItems"]], "gettotalnumberofitems() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.getTotalNumberOfItems"]], "gettotalutility() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.getTotalUtility"]], "gettransanctionallengthdistribution() (pami.extras.stats.transactionaldatabase.transactionaldatabase method)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase.getTransanctionalLengthDistribution"]], "gettransanctionallengthdistribution() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getTransanctionalLengthDistribution"]], "gettransanctionallengthdistribution() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.getTransanctionalLengthDistribution"]], "getvariancesequencelength() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.getVarianceSequenceLength"]], "getvariancesubsequencelength() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.getVarianceSubsequenceLength"]], "getvariancetransactionlength() (pami.extras.stats.transactionaldatabase.transactionaldatabase method)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase.getVarianceTransactionLength"]], "getvariancetransactionlength() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.getVarianceTransactionLength"]], "getvariancetransactionlength() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.getVarianceTransactionLength"]], "graphdatabase (class in pami.extras.stats.graphdatabase)": [[19, "PAMI.extras.stats.graphDatabase.graphDatabase"]], "plotedgedistribution() (pami.extras.stats.graphdatabase.graphdatabase method)": [[19, "PAMI.extras.stats.graphDatabase.graphDatabase.plotEdgeDistribution"]], "plotgraphs() (pami.extras.stats.transactionaldatabase.transactionaldatabase method)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase.plotGraphs"]], "plotgraphs() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.plotGraphs"]], "plotgraphs() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.plotGraphs"]], "plotgraphs() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.plotGraphs"]], "plotnodedistribution() (pami.extras.stats.graphdatabase.graphdatabase method)": [[19, "PAMI.extras.stats.graphDatabase.graphDatabase.plotNodeDistribution"]], "printgraphdatabasestatistics() (pami.extras.stats.graphdatabase.graphdatabase method)": [[19, "PAMI.extras.stats.graphDatabase.graphDatabase.printGraphDatabaseStatistics"]], "printindividualgraphstats() (pami.extras.stats.graphdatabase.graphdatabase method)": [[19, "PAMI.extras.stats.graphDatabase.graphDatabase.printIndividualGraphStats"]], "printstats() (pami.extras.stats.transactionaldatabase.transactionaldatabase method)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase.printStats"]], "printstats() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.printStats"]], "printstats() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.printStats"]], "printstats() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.printStats"]], "readdatabase() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.readDatabase"]], "readdatabase() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.readDatabase"]], "readdatabase() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.readDatabase"]], "run() (pami.extras.stats.transactionaldatabase.transactionaldatabase method)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase.run"]], "run() (pami.extras.stats.sequentialdatabase.sequentialdatabase method)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase.run"]], "run() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.run"]], "run() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.run"]], "save() (pami.extras.stats.transactionaldatabase.transactionaldatabase method)": [[19, "PAMI.extras.stats.TransactionalDatabase.TransactionalDatabase.save"]], "save() (pami.extras.stats.temporaldatabase.temporaldatabase method)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase.save"]], "save() (pami.extras.stats.utilitydatabase.utilitydatabase method)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase.save"]], "sequentialdatabase (class in pami.extras.stats.sequentialdatabase)": [[19, "PAMI.extras.stats.sequentialDatabase.sequentialDatabase"]], "temporaldatabase (class in pami.extras.stats.temporaldatabase)": [[19, "PAMI.extras.stats.temporalDatabase.temporalDatabase"]], "utilitydatabase (class in pami.extras.stats.utilitydatabase)": [[19, "PAMI.extras.stats.utilityDatabase.utilityDatabase"]], "createsynthetictemporal (class in pami.extras.syntheticdatagenerator.temporaldatabasegen)": [[20, "PAMI.extras.syntheticDataGenerator.temporalDatabaseGen.CreateSyntheticTemporal"]], "generate() (pami.extras.syntheticdatagenerator.utilitydatabase.utilitydatagenerator method)": [[20, "PAMI.extras.syntheticDataGenerator.utilityDatabase.UtilityDataGenerator.Generate"]], "generateandprintitempairs() (pami.extras.syntheticdatagenerator.utilitydatabase.utilitydatagenerator method)": [[20, "PAMI.extras.syntheticDataGenerator.utilityDatabase.UtilityDataGenerator.GenerateAndPrintItemPairs"]], "generateexternalutilitydata() (pami.extras.syntheticdatagenerator.utilitydatabase.utilitydatagenerator method)": [[20, "PAMI.extras.syntheticDataGenerator.utilityDatabase.UtilityDataGenerator.GenerateExternalUtilityData"]], "getexternalutilitydata() (pami.extras.syntheticdatagenerator.utilitydatabase.utilitydatagenerator method)": [[20, "PAMI.extras.syntheticDataGenerator.utilityDatabase.UtilityDataGenerator.GetExternalUtilityData"]], "getinternalutilitydata() (pami.extras.syntheticdatagenerator.utilitydatabase.utilitydatagenerator method)": [[20, "PAMI.extras.syntheticDataGenerator.utilityDatabase.UtilityDataGenerator.GetInternalUtilityData"]], "getutilitydata() (pami.extras.syntheticdatagenerator.utilitydatabase.utilitydatagenerator method)": [[20, "PAMI.extras.syntheticDataGenerator.utilityDatabase.UtilityDataGenerator.GetUtilityData"]], "pami.extras.syntheticdatagenerator": [[20, "module-PAMI.extras.syntheticDataGenerator"]], "pami.extras.syntheticdatagenerator.temporaldatabase": [[20, "module-PAMI.extras.syntheticDataGenerator.TemporalDatabase"]], "pami.extras.syntheticdatagenerator.transactionaldatabase": [[20, "module-PAMI.extras.syntheticDataGenerator.TransactionalDatabase"]], "pami.extras.syntheticdatagenerator.createsyntheticgeoreferentialtemporal": [[20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTemporal"]], "pami.extras.syntheticdatagenerator.createsyntheticgeoreferentialtransactions": [[20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTransactions"]], "pami.extras.syntheticdatagenerator.createsyntheticgeoreferentialuncertaintransaction": [[20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialUncertainTransaction"]], "pami.extras.syntheticdatagenerator.createsynthetictemporal": [[20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticTemporal"]], "pami.extras.syntheticdatagenerator.createsynthetictransactions": [[20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticTransactions"]], "pami.extras.syntheticdatagenerator.createsyntheticuncertaintemporal": [[20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTemporal"]], "pami.extras.syntheticdatagenerator.createsyntheticuncertaintransactions": [[20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTransactions"]], "pami.extras.syntheticdatagenerator.createsyntheticutility": [[20, "module-PAMI.extras.syntheticDataGenerator.createSyntheticUtility"]], "pami.extras.syntheticdatagenerator.fuzzydatabase": [[20, "module-PAMI.extras.syntheticDataGenerator.fuzzyDatabase"]], "pami.extras.syntheticdatagenerator.generatetemporal": [[20, "module-PAMI.extras.syntheticDataGenerator.generateTemporal"]], "pami.extras.syntheticdatagenerator.generatetransactional": [[20, "module-PAMI.extras.syntheticDataGenerator.generateTransactional"]], "pami.extras.syntheticdatagenerator.generateuncertaintemporal": [[20, "module-PAMI.extras.syntheticDataGenerator.generateUncertainTemporal"]], "pami.extras.syntheticdatagenerator.generateuncertaintransactional": [[20, "module-PAMI.extras.syntheticDataGenerator.generateUncertainTransactional"]], "pami.extras.syntheticdatagenerator.generateutilitytemporal": [[20, "module-PAMI.extras.syntheticDataGenerator.generateUtilityTemporal"]], "pami.extras.syntheticdatagenerator.generateutilitytransactional": [[20, "module-PAMI.extras.syntheticDataGenerator.generateUtilityTransactional"]], "pami.extras.syntheticdatagenerator.georeferencedtemporaldatabase": [[20, "module-PAMI.extras.syntheticDataGenerator.georeferencedTemporalDatabase"]], "pami.extras.syntheticdatagenerator.georeferencedtransactionaldatabase": [[20, "module-PAMI.extras.syntheticDataGenerator.georeferencedTransactionalDatabase"]], "pami.extras.syntheticdatagenerator.syntheticutilitydatabase": [[20, "module-PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase"]], "pami.extras.syntheticdatagenerator.temporaldatabasegen": [[20, "module-PAMI.extras.syntheticDataGenerator.temporalDatabaseGen"]], "pami.extras.syntheticdatagenerator.utilitydatabase": [[20, "module-PAMI.extras.syntheticDataGenerator.utilityDatabase"]], "save() (pami.extras.syntheticdatagenerator.utilitydatabase.utilitydatagenerator method)": [[20, "PAMI.extras.syntheticDataGenerator.utilityDatabase.UtilityDataGenerator.Save"]], "saveitemsinternalutilityvalues() (pami.extras.syntheticdatagenerator.utilitydatabase.utilitydatagenerator method)": [[20, "PAMI.extras.syntheticDataGenerator.utilityDatabase.UtilityDataGenerator.SaveItemsInternalUtilityValues"]], "saveitemsexternalutilityvalues() (pami.extras.syntheticdatagenerator.utilitydatabase.utilitydatagenerator method)": [[20, "PAMI.extras.syntheticDataGenerator.utilityDatabase.UtilityDataGenerator.Saveitemsexternalutilityvalues"]], "temporaldatabase (class in pami.extras.syntheticdatagenerator.temporaldatabase)": [[20, "PAMI.extras.syntheticDataGenerator.TemporalDatabase.TemporalDatabase"]], "transactionaldatabase (class in pami.extras.syntheticdatagenerator.transactionaldatabase)": [[20, "PAMI.extras.syntheticDataGenerator.TransactionalDatabase.TransactionalDatabase"]], "utilitydatagenerator (class in pami.extras.syntheticdatagenerator.utilitydatabase)": [[20, "PAMI.extras.syntheticDataGenerator.utilityDatabase.UtilityDataGenerator"]], "__init__() (pami.extras.syntheticdatagenerator.syntheticutilitydatabase.syntheticutilitydatabase method)": [[20, "PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase.syntheticUtilityDatabase.__init__"]], "avgtransactionlength (pami.extras.syntheticdatagenerator.syntheticutilitydatabase.syntheticutilitydatabase attribute)": [[20, "PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase.syntheticUtilityDatabase.avgTransactionLength"]], "avg_transaction_length (pami.extras.syntheticdatagenerator.temporaldatabasegen.createsynthetictemporal attribute)": [[20, "PAMI.extras.syntheticDataGenerator.temporalDatabaseGen.CreateSyntheticTemporal.avg_transaction_length"]], "create() (pami.extras.syntheticdatagenerator.temporaldatabase.temporaldatabase method)": [[20, "PAMI.extras.syntheticDataGenerator.TemporalDatabase.TemporalDatabase.create"]], "create() (pami.extras.syntheticdatagenerator.transactionaldatabase.transactionaldatabase method)": [[20, "PAMI.extras.syntheticDataGenerator.TransactionalDatabase.TransactionalDatabase.create"]], "creategeoreferentialtemporaldatabase (class in pami.extras.syntheticdatagenerator.createsyntheticgeoreferentialtemporal)": [[20, "PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTemporal.createGeoreferentialTemporalDatabase"]], "creategeoreferentialtemporaldatabase() (pami.extras.syntheticdatagenerator.createsyntheticgeoreferentialtemporal.creategeoreferentialtemporaldatabase method)": [[20, "PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTemporal.createGeoreferentialTemporalDatabase.createGeoreferentialTemporalDatabase"]], "creategeoreferentialtransactionaldatabase() (pami.extras.syntheticdatagenerator.createsyntheticgeoreferentialtransactions.createsyntheticgeoreferentialtransaction method)": [[20, "PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTransactions.createSyntheticGeoreferentialTransaction.createGeoreferentialTransactionalDatabase"]], "creategeoreferentialuncertaintransactionaldatabase() (pami.extras.syntheticdatagenerator.createsyntheticgeoreferentialuncertaintransaction.createsyntheticgeoreferentialuncertaintransaction method)": [[20, "PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialUncertainTransaction.createSyntheticGeoreferentialUncertainTransaction.createGeoreferentialUncertainTransactionalDatabase"]], "createrandomnumbers() (pami.extras.syntheticdatagenerator.syntheticutilitydatabase.syntheticutilitydatabase method)": [[20, "PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase.syntheticUtilityDatabase.createRandomNumbers"], [20, "id0"]], "createsyntheticgeoreferentialtransaction (class in pami.extras.syntheticdatagenerator.createsyntheticgeoreferentialtransactions)": [[20, "PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialTransactions.createSyntheticGeoreferentialTransaction"]], "createsyntheticgeoreferentialuncertaintransaction (class in pami.extras.syntheticdatagenerator.createsyntheticgeoreferentialuncertaintransaction)": [[20, "PAMI.extras.syntheticDataGenerator.createSyntheticGeoreferentialUncertainTransaction.createSyntheticGeoreferentialUncertainTransaction"]], "createsynthetictemporal (class in pami.extras.syntheticdatagenerator.createsynthetictemporal)": [[20, "PAMI.extras.syntheticDataGenerator.createSyntheticTemporal.createSyntheticTemporal"]], "createsynthetictransaction (class in pami.extras.syntheticdatagenerator.createsynthetictransactions)": [[20, "PAMI.extras.syntheticDataGenerator.createSyntheticTransactions.createSyntheticTransaction"]], "createsyntheticuncertaintemporal (class in pami.extras.syntheticdatagenerator.createsyntheticuncertaintemporal)": [[20, "PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTemporal.createSyntheticUncertainTemporal"]], "createsyntheticuncertaintransaction (class in pami.extras.syntheticdatagenerator.createsyntheticuncertaintransactions)": [[20, "PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTransactions.createSyntheticUncertainTransaction"]], "createsyntheticutility (class in pami.extras.syntheticdatagenerator.createsyntheticutility)": [[20, "PAMI.extras.syntheticDataGenerator.createSyntheticUtility.createSyntheticUtility"]], "createsyntheticutilitydatabase() (pami.extras.syntheticdatagenerator.syntheticutilitydatabase.syntheticutilitydatabase method)": [[20, "PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase.syntheticUtilityDatabase.createSyntheticUtilityDatabase"], [20, "id10"]], "createtemporaldatabase() (pami.extras.syntheticdatagenerator.createsynthetictemporal.createsynthetictemporal method)": [[20, "PAMI.extras.syntheticDataGenerator.createSyntheticTemporal.createSyntheticTemporal.createTemporalDatabase"]], "createtransactionaldatabase() (pami.extras.syntheticdatagenerator.createsynthetictransactions.createsynthetictransaction method)": [[20, "PAMI.extras.syntheticDataGenerator.createSyntheticTransactions.createSyntheticTransaction.createTransactionalDatabase"]], "createuncertaintemporaldatabase() (pami.extras.syntheticdatagenerator.createsyntheticuncertaintemporal.createsyntheticuncertaintemporal method)": [[20, "PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTemporal.createSyntheticUncertainTemporal.createUncertainTemporalDatabase"]], "createuncertaintransactionaldatabase() (pami.extras.syntheticdatagenerator.createsyntheticuncertaintransactions.createsyntheticuncertaintransaction method)": [[20, "PAMI.extras.syntheticDataGenerator.createSyntheticUncertainTransactions.createSyntheticUncertainTransaction.createUncertainTransactionalDatabase"]], "createutilitydatabase() (pami.extras.syntheticdatagenerator.createsyntheticutility.createsyntheticutility method)": [[20, "PAMI.extras.syntheticDataGenerator.createSyntheticUtility.createSyntheticUtility.createUtilityDatabase"]], "create_temporal_database() (pami.extras.syntheticdatagenerator.temporaldatabasegen.createsynthetictemporal method)": [[20, "PAMI.extras.syntheticDataGenerator.temporalDatabaseGen.CreateSyntheticTemporal.create_temporal_database"], [20, "id12"]], "generate() (pami.extras.syntheticdatagenerator.generatetemporal.generatetemporal method)": [[20, "PAMI.extras.syntheticDataGenerator.generateTemporal.generateTemporal.generate"]], "generate() (pami.extras.syntheticdatagenerator.generatetransactional.generatetransactional method)": [[20, "PAMI.extras.syntheticDataGenerator.generateTransactional.generateTransactional.generate"]], "generate() (pami.extras.syntheticdatagenerator.generateuncertaintemporal.generateuncertaintemporal method)": [[20, "PAMI.extras.syntheticDataGenerator.generateUncertainTemporal.generateUncertainTemporal.generate"]], "generate() (pami.extras.syntheticdatagenerator.generateuncertaintransactional.generateuncertaintransactional method)": [[20, "PAMI.extras.syntheticDataGenerator.generateUncertainTransactional.generateUncertainTransactional.generate"]], "generate() (pami.extras.syntheticdatagenerator.generateutilitytemporal.generateutilitytemporal method)": [[20, "PAMI.extras.syntheticDataGenerator.generateUtilityTemporal.generateUtilityTemporal.generate"]], "generate() (pami.extras.syntheticdatagenerator.generateutilitytransactional.generateutilitytransactional method)": [[20, "PAMI.extras.syntheticDataGenerator.generateUtilityTransactional.generateUtilityTransactional.generate"]], "generatearray() (pami.extras.syntheticdatagenerator.transactionaldatabase.transactionaldatabase method)": [[20, "PAMI.extras.syntheticDataGenerator.TransactionalDatabase.TransactionalDatabase.generateArray"]], "generatetemporal (class in pami.extras.syntheticdatagenerator.generatetemporal)": [[20, "PAMI.extras.syntheticDataGenerator.generateTemporal.generateTemporal"]], "generatetransactional (class in pami.extras.syntheticdatagenerator.generatetransactional)": [[20, "PAMI.extras.syntheticDataGenerator.generateTransactional.generateTransactional"]], "generateuncertaintemporal (class in pami.extras.syntheticdatagenerator.generateuncertaintemporal)": [[20, "PAMI.extras.syntheticDataGenerator.generateUncertainTemporal.generateUncertainTemporal"]], "generateuncertaintransactional (class in pami.extras.syntheticdatagenerator.generateuncertaintransactional)": [[20, "PAMI.extras.syntheticDataGenerator.generateUncertainTransactional.generateUncertainTransactional"]], "generateutilitytemporal (class in pami.extras.syntheticdatagenerator.generateutilitytemporal)": [[20, "PAMI.extras.syntheticDataGenerator.generateUtilityTemporal.generateUtilityTemporal"]], "generateutilitytransactional (class in pami.extras.syntheticdatagenerator.generateutilitytransactional)": [[20, "PAMI.extras.syntheticDataGenerator.generateUtilityTransactional.generateUtilityTransactional"]], "generate_random_numbers() (pami.extras.syntheticdatagenerator.temporaldatabasegen.createsynthetictemporal method)": [[20, "PAMI.extras.syntheticDataGenerator.temporalDatabaseGen.CreateSyntheticTemporal.generate_random_numbers"]], "getdatabaseasdataframe() (pami.extras.syntheticdatagenerator.temporaldatabase.temporaldatabase method)": [[20, "PAMI.extras.syntheticDataGenerator.TemporalDatabase.TemporalDatabase.getDatabaseAsDataFrame"]], "getfilename() (pami.extras.syntheticdatagenerator.temporaldatabase.temporaldatabase method)": [[20, "PAMI.extras.syntheticDataGenerator.TemporalDatabase.TemporalDatabase.getFileName"]], "gettransactions() (pami.extras.syntheticdatagenerator.transactionaldatabase.transactionaldatabase method)": [[20, "PAMI.extras.syntheticDataGenerator.TransactionalDatabase.TransactionalDatabase.getTransactions"]], "maxutilrange (pami.extras.syntheticdatagenerator.syntheticutilitydatabase.syntheticutilitydatabase attribute)": [[20, "PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase.syntheticUtilityDatabase.maxUtilRange"]], "numofitems (pami.extras.syntheticdatagenerator.syntheticutilitydatabase.syntheticutilitydatabase attribute)": [[20, "PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase.syntheticUtilityDatabase.numOfItems"]], "num_of_items (pami.extras.syntheticdatagenerator.temporaldatabasegen.createsynthetictemporal attribute)": [[20, "PAMI.extras.syntheticDataGenerator.temporalDatabaseGen.CreateSyntheticTemporal.num_of_items"]], "performcoinflip() (pami.extras.syntheticdatagenerator.temporaldatabase.temporaldatabase method)": [[20, "PAMI.extras.syntheticDataGenerator.TemporalDatabase.TemporalDatabase.performCoinFlip"]], "save() (pami.extras.syntheticdatagenerator.transactionaldatabase.transactionaldatabase method)": [[20, "PAMI.extras.syntheticDataGenerator.TransactionalDatabase.TransactionalDatabase.save"]], "save() (pami.extras.syntheticdatagenerator.generatetemporal.generatetemporal method)": [[20, "PAMI.extras.syntheticDataGenerator.generateTemporal.generateTemporal.save"]], "save() (pami.extras.syntheticdatagenerator.generatetransactional.generatetransactional method)": [[20, "PAMI.extras.syntheticDataGenerator.generateTransactional.generateTransactional.save"]], "save() (pami.extras.syntheticdatagenerator.generateuncertaintemporal.generateuncertaintemporal method)": [[20, "PAMI.extras.syntheticDataGenerator.generateUncertainTemporal.generateUncertainTemporal.save"]], "save() (pami.extras.syntheticdatagenerator.generateuncertaintransactional.generateuncertaintransactional method)": [[20, "PAMI.extras.syntheticDataGenerator.generateUncertainTransactional.generateUncertainTransactional.save"]], "save() (pami.extras.syntheticdatagenerator.generateutilitytemporal.generateutilitytemporal method)": [[20, "PAMI.extras.syntheticDataGenerator.generateUtilityTemporal.generateUtilityTemporal.save"]], "save() (pami.extras.syntheticdatagenerator.generateutilitytransactional.generateutilitytransactional method)": [[20, "PAMI.extras.syntheticDataGenerator.generateUtilityTransactional.generateUtilityTransactional.save"]], "save() (pami.extras.syntheticdatagenerator.syntheticutilitydatabase.syntheticutilitydatabase method)": [[20, "PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase.syntheticUtilityDatabase.save"], [20, "id11"]], "syntheticutilitydatabase (class in pami.extras.syntheticdatagenerator.syntheticutilitydatabase)": [[20, "PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase.syntheticUtilityDatabase"]], "totaltransactions (pami.extras.syntheticdatagenerator.syntheticutilitydatabase.syntheticutilitydatabase attribute)": [[20, "PAMI.extras.syntheticDataGenerator.syntheticUtilityDatabase.syntheticUtilityDatabase.totalTransactions"]], "total_transactions (pami.extras.syntheticdatagenerator.temporaldatabasegen.createsynthetictemporal attribute)": [[20, "PAMI.extras.syntheticDataGenerator.temporalDatabaseGen.CreateSyntheticTemporal.total_transactions"]], "tuning() (pami.extras.syntheticdatagenerator.temporaldatabase.temporaldatabase method)": [[20, "PAMI.extras.syntheticDataGenerator.TemporalDatabase.TemporalDatabase.tuning"]], "tuning() (pami.extras.syntheticdatagenerator.transactionaldatabase.transactionaldatabase method)": [[20, "PAMI.extras.syntheticDataGenerator.TransactionalDatabase.TransactionalDatabase.tuning"]], "pami.extras.visualize": [[21, "module-PAMI.extras.visualize"]], "pami.extras.visualize.graphs": [[21, "module-PAMI.extras.visualize.graphs"]], "graphdatabase (class in pami.extras.visualize.graphs)": [[21, "PAMI.extras.visualize.graphs.graphDatabase"]], "plot() (pami.extras.visualize.graphs.graphdatabase method)": [[21, "PAMI.extras.visualize.graphs.graphDatabase.plot"]], "pami.faulttolerantfrequentpattern": [[22, "module-PAMI.faultTolerantFrequentPattern"]], "ftapriori (class in pami.faulttolerantfrequentpattern.basic.ftapriori)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTApriori.FTApriori"]], "ftfpgrowth (class in pami.faulttolerantfrequentpattern.basic.ftfpgrowth)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth.FTFPGrowth"]], "pami.faulttolerantfrequentpattern.basic": [[23, "module-PAMI.faultTolerantFrequentPattern.basic"]], "pami.faulttolerantfrequentpattern.basic.ftapriori": [[23, "module-PAMI.faultTolerantFrequentPattern.basic.FTApriori"]], "pami.faulttolerantfrequentpattern.basic.ftfpgrowth": [[23, "module-PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth"]], "pami.faulttolerantfrequentpattern.basic.abstract": [[23, "module-PAMI.faultTolerantFrequentPattern.basic.abstract"]], "getmemoryrss() (pami.faulttolerantfrequentpattern.basic.ftapriori.ftapriori method)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTApriori.FTApriori.getMemoryRSS"]], "getmemoryrss() (pami.faulttolerantfrequentpattern.basic.ftfpgrowth.ftfpgrowth method)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth.FTFPGrowth.getMemoryRSS"]], "getmemoryuss() (pami.faulttolerantfrequentpattern.basic.ftapriori.ftapriori method)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTApriori.FTApriori.getMemoryUSS"]], "getmemoryuss() (pami.faulttolerantfrequentpattern.basic.ftfpgrowth.ftfpgrowth method)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth.FTFPGrowth.getMemoryUSS"]], "getpatterns() (pami.faulttolerantfrequentpattern.basic.ftapriori.ftapriori method)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTApriori.FTApriori.getPatterns"]], "getpatterns() (pami.faulttolerantfrequentpattern.basic.ftfpgrowth.ftfpgrowth method)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth.FTFPGrowth.getPatterns"]], "getpatternsasdataframe() (pami.faulttolerantfrequentpattern.basic.ftapriori.ftapriori method)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTApriori.FTApriori.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.faulttolerantfrequentpattern.basic.ftfpgrowth.ftfpgrowth method)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth.FTFPGrowth.getPatternsAsDataFrame"]], "getruntime() (pami.faulttolerantfrequentpattern.basic.ftapriori.ftapriori method)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTApriori.FTApriori.getRuntime"]], "getruntime() (pami.faulttolerantfrequentpattern.basic.ftfpgrowth.ftfpgrowth method)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth.FTFPGrowth.getRuntime"]], "mine() (pami.faulttolerantfrequentpattern.basic.ftapriori.ftapriori method)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTApriori.FTApriori.mine"]], "mine() (pami.faulttolerantfrequentpattern.basic.ftfpgrowth.ftfpgrowth method)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth.FTFPGrowth.mine"]], "printresults() (pami.faulttolerantfrequentpattern.basic.ftapriori.ftapriori method)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTApriori.FTApriori.printResults"]], "printresults() (pami.faulttolerantfrequentpattern.basic.ftfpgrowth.ftfpgrowth method)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth.FTFPGrowth.printResults"]], "save() (pami.faulttolerantfrequentpattern.basic.ftapriori.ftapriori method)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTApriori.FTApriori.save"]], "save() (pami.faulttolerantfrequentpattern.basic.ftfpgrowth.ftfpgrowth method)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth.FTFPGrowth.save"]], "startmine() (pami.faulttolerantfrequentpattern.basic.ftapriori.ftapriori method)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTApriori.FTApriori.startMine"]], "startmine() (pami.faulttolerantfrequentpattern.basic.ftfpgrowth.ftfpgrowth method)": [[23, "PAMI.faultTolerantFrequentPattern.basic.FTFPGrowth.FTFPGrowth.startMine"]], "pami.frequentpattern": [[24, "module-PAMI.frequentPattern"]], "apriori (class in pami.frequentpattern.basic.apriori)": [[25, "PAMI.frequentPattern.basic.Apriori.Apriori"]], "eclat (class in pami.frequentpattern.basic.eclat)": [[25, "PAMI.frequentPattern.basic.ECLAT.ECLAT"]], "eclatdiffset (class in pami.frequentpattern.basic.eclatdiffset)": [[25, "PAMI.frequentPattern.basic.ECLATDiffset.ECLATDiffset"]], "eclatbitset (class in pami.frequentpattern.basic.eclatbitset)": [[25, "PAMI.frequentPattern.basic.ECLATbitset.ECLATbitset"]], "fpgrowth (class in pami.frequentpattern.basic.fpgrowth)": [[25, "PAMI.frequentPattern.basic.FPGrowth.FPGrowth"]], "pami.frequentpattern.basic": [[25, "module-PAMI.frequentPattern.basic"]], "pami.frequentpattern.basic.apriori": [[25, "module-PAMI.frequentPattern.basic.Apriori"]], "pami.frequentpattern.basic.eclat": [[25, "module-PAMI.frequentPattern.basic.ECLAT"]], "pami.frequentpattern.basic.eclatdiffset": [[25, "module-PAMI.frequentPattern.basic.ECLATDiffset"]], "pami.frequentpattern.basic.eclatbitset": [[25, "module-PAMI.frequentPattern.basic.ECLATbitset"]], "pami.frequentpattern.basic.fpgrowth": [[25, "module-PAMI.frequentPattern.basic.FPGrowth"]], "pami.frequentpattern.basic.abstract": [[25, "module-PAMI.frequentPattern.basic.abstract"]], "getmemoryrss() (pami.frequentpattern.basic.apriori.apriori method)": [[25, "PAMI.frequentPattern.basic.Apriori.Apriori.getMemoryRSS"]], "getmemoryrss() (pami.frequentpattern.basic.eclat.eclat method)": [[25, "PAMI.frequentPattern.basic.ECLAT.ECLAT.getMemoryRSS"]], "getmemoryrss() (pami.frequentpattern.basic.eclatdiffset.eclatdiffset method)": [[25, "PAMI.frequentPattern.basic.ECLATDiffset.ECLATDiffset.getMemoryRSS"]], "getmemoryrss() (pami.frequentpattern.basic.eclatbitset.eclatbitset method)": [[25, "PAMI.frequentPattern.basic.ECLATbitset.ECLATbitset.getMemoryRSS"]], "getmemoryrss() (pami.frequentpattern.basic.fpgrowth.fpgrowth method)": [[25, "PAMI.frequentPattern.basic.FPGrowth.FPGrowth.getMemoryRSS"]], "getmemoryuss() (pami.frequentpattern.basic.apriori.apriori method)": [[25, "PAMI.frequentPattern.basic.Apriori.Apriori.getMemoryUSS"]], "getmemoryuss() (pami.frequentpattern.basic.eclat.eclat method)": [[25, "PAMI.frequentPattern.basic.ECLAT.ECLAT.getMemoryUSS"]], "getmemoryuss() (pami.frequentpattern.basic.eclatdiffset.eclatdiffset method)": [[25, "PAMI.frequentPattern.basic.ECLATDiffset.ECLATDiffset.getMemoryUSS"]], "getmemoryuss() (pami.frequentpattern.basic.eclatbitset.eclatbitset method)": [[25, "PAMI.frequentPattern.basic.ECLATbitset.ECLATbitset.getMemoryUSS"]], "getmemoryuss() (pami.frequentpattern.basic.fpgrowth.fpgrowth method)": [[25, "PAMI.frequentPattern.basic.FPGrowth.FPGrowth.getMemoryUSS"]], "getpatterns() (pami.frequentpattern.basic.apriori.apriori method)": [[25, "PAMI.frequentPattern.basic.Apriori.Apriori.getPatterns"]], "getpatterns() (pami.frequentpattern.basic.eclat.eclat method)": [[25, "PAMI.frequentPattern.basic.ECLAT.ECLAT.getPatterns"]], "getpatterns() (pami.frequentpattern.basic.eclatdiffset.eclatdiffset method)": [[25, "PAMI.frequentPattern.basic.ECLATDiffset.ECLATDiffset.getPatterns"]], "getpatterns() (pami.frequentpattern.basic.eclatbitset.eclatbitset method)": [[25, "PAMI.frequentPattern.basic.ECLATbitset.ECLATbitset.getPatterns"]], "getpatterns() (pami.frequentpattern.basic.fpgrowth.fpgrowth method)": [[25, "PAMI.frequentPattern.basic.FPGrowth.FPGrowth.getPatterns"]], "getpatternsasdataframe() (pami.frequentpattern.basic.apriori.apriori method)": [[25, "PAMI.frequentPattern.basic.Apriori.Apriori.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.frequentpattern.basic.eclat.eclat method)": [[25, "PAMI.frequentPattern.basic.ECLAT.ECLAT.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.frequentpattern.basic.eclatdiffset.eclatdiffset method)": [[25, "PAMI.frequentPattern.basic.ECLATDiffset.ECLATDiffset.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.frequentpattern.basic.eclatbitset.eclatbitset method)": [[25, "PAMI.frequentPattern.basic.ECLATbitset.ECLATbitset.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.frequentpattern.basic.fpgrowth.fpgrowth method)": [[25, "PAMI.frequentPattern.basic.FPGrowth.FPGrowth.getPatternsAsDataFrame"]], "getruntime() (pami.frequentpattern.basic.apriori.apriori method)": [[25, "PAMI.frequentPattern.basic.Apriori.Apriori.getRuntime"]], "getruntime() (pami.frequentpattern.basic.eclat.eclat method)": [[25, "PAMI.frequentPattern.basic.ECLAT.ECLAT.getRuntime"]], "getruntime() (pami.frequentpattern.basic.eclatdiffset.eclatdiffset method)": [[25, "PAMI.frequentPattern.basic.ECLATDiffset.ECLATDiffset.getRuntime"]], "getruntime() (pami.frequentpattern.basic.eclatbitset.eclatbitset method)": [[25, "PAMI.frequentPattern.basic.ECLATbitset.ECLATbitset.getRuntime"]], "getruntime() (pami.frequentpattern.basic.fpgrowth.fpgrowth method)": [[25, "PAMI.frequentPattern.basic.FPGrowth.FPGrowth.getRuntime"]], "mine() (pami.frequentpattern.basic.apriori.apriori method)": [[25, "PAMI.frequentPattern.basic.Apriori.Apriori.mine"]], "mine() (pami.frequentpattern.basic.eclat.eclat method)": [[25, "PAMI.frequentPattern.basic.ECLAT.ECLAT.mine"]], "mine() (pami.frequentpattern.basic.eclatdiffset.eclatdiffset method)": [[25, "PAMI.frequentPattern.basic.ECLATDiffset.ECLATDiffset.mine"]], "mine() (pami.frequentpattern.basic.eclatbitset.eclatbitset method)": [[25, "PAMI.frequentPattern.basic.ECLATbitset.ECLATbitset.mine"]], "mine() (pami.frequentpattern.basic.fpgrowth.fpgrowth method)": [[25, "PAMI.frequentPattern.basic.FPGrowth.FPGrowth.mine"]], "printresults() (pami.frequentpattern.basic.apriori.apriori method)": [[25, "PAMI.frequentPattern.basic.Apriori.Apriori.printResults"]], "printresults() (pami.frequentpattern.basic.eclat.eclat method)": [[25, "PAMI.frequentPattern.basic.ECLAT.ECLAT.printResults"]], "printresults() (pami.frequentpattern.basic.eclatdiffset.eclatdiffset method)": [[25, "PAMI.frequentPattern.basic.ECLATDiffset.ECLATDiffset.printResults"]], "printresults() (pami.frequentpattern.basic.eclatbitset.eclatbitset method)": [[25, "PAMI.frequentPattern.basic.ECLATbitset.ECLATbitset.printResults"]], "printresults() (pami.frequentpattern.basic.fpgrowth.fpgrowth method)": [[25, "PAMI.frequentPattern.basic.FPGrowth.FPGrowth.printResults"]], "save() (pami.frequentpattern.basic.apriori.apriori method)": [[25, "PAMI.frequentPattern.basic.Apriori.Apriori.save"]], "save() (pami.frequentpattern.basic.eclat.eclat method)": [[25, "PAMI.frequentPattern.basic.ECLAT.ECLAT.save"]], "save() (pami.frequentpattern.basic.eclatdiffset.eclatdiffset method)": [[25, "PAMI.frequentPattern.basic.ECLATDiffset.ECLATDiffset.save"]], "save() (pami.frequentpattern.basic.eclatbitset.eclatbitset method)": [[25, "PAMI.frequentPattern.basic.ECLATbitset.ECLATbitset.save"]], "save() (pami.frequentpattern.basic.fpgrowth.fpgrowth method)": [[25, "PAMI.frequentPattern.basic.FPGrowth.FPGrowth.save"]], "startmine() (pami.frequentpattern.basic.apriori.apriori method)": [[25, "PAMI.frequentPattern.basic.Apriori.Apriori.startMine"]], "startmine() (pami.frequentpattern.basic.eclat.eclat method)": [[25, "PAMI.frequentPattern.basic.ECLAT.ECLAT.startMine"]], "startmine() (pami.frequentpattern.basic.eclatdiffset.eclatdiffset method)": [[25, "PAMI.frequentPattern.basic.ECLATDiffset.ECLATDiffset.startMine"]], "startmine() (pami.frequentpattern.basic.eclatbitset.eclatbitset method)": [[25, "PAMI.frequentPattern.basic.ECLATbitset.ECLATbitset.startMine"]], "startmine() (pami.frequentpattern.basic.fpgrowth.fpgrowth method)": [[25, "PAMI.frequentPattern.basic.FPGrowth.FPGrowth.startMine"]], "charm (class in pami.frequentpattern.closed.charm)": [[26, "PAMI.frequentPattern.closed.CHARM.CHARM"]], "pami.frequentpattern.closed": [[26, "module-PAMI.frequentPattern.closed"]], "pami.frequentpattern.closed.charm": [[26, "module-PAMI.frequentPattern.closed.CHARM"]], "pami.frequentpattern.closed.abstract": [[26, "module-PAMI.frequentPattern.closed.abstract"]], "getmemoryrss() (pami.frequentpattern.closed.charm.charm method)": [[26, "PAMI.frequentPattern.closed.CHARM.CHARM.getMemoryRSS"]], "getmemoryuss() (pami.frequentpattern.closed.charm.charm method)": [[26, "PAMI.frequentPattern.closed.CHARM.CHARM.getMemoryUSS"]], "getpatterns() (pami.frequentpattern.closed.charm.charm method)": [[26, "PAMI.frequentPattern.closed.CHARM.CHARM.getPatterns"]], "getpatternsasdataframe() (pami.frequentpattern.closed.charm.charm method)": [[26, "PAMI.frequentPattern.closed.CHARM.CHARM.getPatternsAsDataFrame"]], "getruntime() (pami.frequentpattern.closed.charm.charm method)": [[26, "PAMI.frequentPattern.closed.CHARM.CHARM.getRuntime"]], "mine() (pami.frequentpattern.closed.charm.charm method)": [[26, "PAMI.frequentPattern.closed.CHARM.CHARM.mine"]], "printresults() (pami.frequentpattern.closed.charm.charm method)": [[26, "PAMI.frequentPattern.closed.CHARM.CHARM.printResults"]], "save() (pami.frequentpattern.closed.charm.charm method)": [[26, "PAMI.frequentPattern.closed.CHARM.CHARM.save"]], "startmine() (pami.frequentpattern.closed.charm.charm method)": [[26, "PAMI.frequentPattern.closed.CHARM.CHARM.startMine"]], "pami.frequentpattern.cuda": [[27, "module-PAMI.frequentPattern.cuda"]], "maxfpgrowth (class in pami.frequentpattern.maximal.maxfpgrowth)": [[28, "PAMI.frequentPattern.maximal.MaxFPGrowth.MaxFPGrowth"]], "pami.frequentpattern.maximal": [[28, "module-PAMI.frequentPattern.maximal"]], "pami.frequentpattern.maximal.maxfpgrowth": [[28, "module-PAMI.frequentPattern.maximal.MaxFPGrowth"]], "pami.frequentpattern.maximal.abstract": [[28, "module-PAMI.frequentPattern.maximal.abstract"]], "getmemoryrss() (pami.frequentpattern.maximal.maxfpgrowth.maxfpgrowth method)": [[28, "PAMI.frequentPattern.maximal.MaxFPGrowth.MaxFPGrowth.getMemoryRSS"]], "getmemoryuss() (pami.frequentpattern.maximal.maxfpgrowth.maxfpgrowth method)": [[28, "PAMI.frequentPattern.maximal.MaxFPGrowth.MaxFPGrowth.getMemoryUSS"]], "getpatterns() (pami.frequentpattern.maximal.maxfpgrowth.maxfpgrowth method)": [[28, "PAMI.frequentPattern.maximal.MaxFPGrowth.MaxFPGrowth.getPatterns"]], "getpatternsasdataframe() (pami.frequentpattern.maximal.maxfpgrowth.maxfpgrowth method)": [[28, "PAMI.frequentPattern.maximal.MaxFPGrowth.MaxFPGrowth.getPatternsAsDataFrame"]], "getruntime() (pami.frequentpattern.maximal.maxfpgrowth.maxfpgrowth method)": [[28, "PAMI.frequentPattern.maximal.MaxFPGrowth.MaxFPGrowth.getRuntime"]], "mine() (pami.frequentpattern.maximal.maxfpgrowth.maxfpgrowth method)": [[28, "PAMI.frequentPattern.maximal.MaxFPGrowth.MaxFPGrowth.mine"]], "printresults() (pami.frequentpattern.maximal.maxfpgrowth.maxfpgrowth method)": [[28, "PAMI.frequentPattern.maximal.MaxFPGrowth.MaxFPGrowth.printResults"]], "save() (pami.frequentpattern.maximal.maxfpgrowth.maxfpgrowth method)": [[28, "PAMI.frequentPattern.maximal.MaxFPGrowth.MaxFPGrowth.save"]], "startmine() (pami.frequentpattern.maximal.maxfpgrowth.maxfpgrowth method)": [[28, "PAMI.frequentPattern.maximal.MaxFPGrowth.MaxFPGrowth.startMine"]], "pami.frequentpattern.pyspark": [[29, "module-PAMI.frequentPattern.pyspark"]], "fae (class in pami.frequentpattern.topk.fae)": [[30, "PAMI.frequentPattern.topk.FAE.FAE"]], "pami.frequentpattern.topk": [[30, "module-PAMI.frequentPattern.topk"]], "pami.frequentpattern.topk.fae": [[30, "module-PAMI.frequentPattern.topk.FAE"]], "pami.frequentpattern.topk.abstract": [[30, "module-PAMI.frequentPattern.topk.abstract"]], "getmemoryrss() (pami.frequentpattern.topk.fae.fae method)": [[30, "PAMI.frequentPattern.topk.FAE.FAE.getMemoryRSS"]], "getmemoryuss() (pami.frequentpattern.topk.fae.fae method)": [[30, "PAMI.frequentPattern.topk.FAE.FAE.getMemoryUSS"]], "getpatterns() (pami.frequentpattern.topk.fae.fae method)": [[30, "PAMI.frequentPattern.topk.FAE.FAE.getPatterns"]], "getpatternsasdataframe() (pami.frequentpattern.topk.fae.fae method)": [[30, "PAMI.frequentPattern.topk.FAE.FAE.getPatternsAsDataFrame"]], "getruntime() (pami.frequentpattern.topk.fae.fae method)": [[30, "PAMI.frequentPattern.topk.FAE.FAE.getRuntime"]], "mine() (pami.frequentpattern.topk.fae.fae method)": [[30, "PAMI.frequentPattern.topk.FAE.FAE.mine"]], "printtopk() (pami.frequentpattern.topk.fae.fae method)": [[30, "PAMI.frequentPattern.topk.FAE.FAE.printTOPK"]], "save() (pami.frequentpattern.topk.fae.fae method)": [[30, "PAMI.frequentPattern.topk.FAE.FAE.save"]], "startmine() (pami.frequentpattern.topk.fae.fae method)": [[30, "PAMI.frequentPattern.topk.FAE.FAE.startMine"]], "pami.fuzzycorrelatedpattern": [[31, "module-PAMI.fuzzyCorrelatedPattern"]], "element (class in pami.fuzzycorrelatedpattern.basic.fcpgrowth)": [[32, "PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth.Element"]], "fcpgrowth (class in pami.fuzzycorrelatedpattern.basic.fcpgrowth)": [[32, "PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth.FCPGrowth"]], "pami.fuzzycorrelatedpattern.basic": [[32, "module-PAMI.fuzzyCorrelatedPattern.basic"]], "pami.fuzzycorrelatedpattern.basic.fcpgrowth": [[32, "module-PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth"]], "pami.fuzzycorrelatedpattern.basic.abstract": [[32, "module-PAMI.fuzzyCorrelatedPattern.basic.abstract"]], "getmemoryrss() (pami.fuzzycorrelatedpattern.basic.fcpgrowth.fcpgrowth method)": [[32, "PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth.FCPGrowth.getMemoryRSS"]], "getmemoryuss() (pami.fuzzycorrelatedpattern.basic.fcpgrowth.fcpgrowth method)": [[32, "PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth.FCPGrowth.getMemoryUSS"]], "getpatterns() (pami.fuzzycorrelatedpattern.basic.fcpgrowth.fcpgrowth method)": [[32, "PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth.FCPGrowth.getPatterns"]], "getpatternsasdataframe() (pami.fuzzycorrelatedpattern.basic.fcpgrowth.fcpgrowth method)": [[32, "PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth.FCPGrowth.getPatternsAsDataFrame"]], "getruntime() (pami.fuzzycorrelatedpattern.basic.fcpgrowth.fcpgrowth method)": [[32, "PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth.FCPGrowth.getRuntime"]], "main() (in module pami.fuzzycorrelatedpattern.basic.fcpgrowth)": [[32, "PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth.main"]], "mine() (pami.fuzzycorrelatedpattern.basic.fcpgrowth.fcpgrowth method)": [[32, "PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth.FCPGrowth.mine"]], "printresults() (pami.fuzzycorrelatedpattern.basic.fcpgrowth.fcpgrowth method)": [[32, "PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth.FCPGrowth.printResults"]], "save() (pami.fuzzycorrelatedpattern.basic.fcpgrowth.fcpgrowth method)": [[32, "PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth.FCPGrowth.save"]], "startmine() (pami.fuzzycorrelatedpattern.basic.fcpgrowth.fcpgrowth method)": [[32, "PAMI.fuzzyCorrelatedPattern.basic.FCPGrowth.FCPGrowth.startMine"]], "pami.fuzzyfrequentpattern": [[33, "module-PAMI.fuzzyFrequentPattern"]], "ffiminer (class in pami.fuzzyfrequentpattern.basic.ffiminer)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner.FFIMiner"]], "ffiminer (class in pami.fuzzyfrequentpattern.basic.ffiminer_old)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner_old.FFIMiner"]], "pami.fuzzyfrequentpattern.basic": [[34, "module-PAMI.fuzzyFrequentPattern.basic"]], "pami.fuzzyfrequentpattern.basic.ffiminer": [[34, "module-PAMI.fuzzyFrequentPattern.basic.FFIMiner"]], "pami.fuzzyfrequentpattern.basic.ffiminer_old": [[34, "module-PAMI.fuzzyFrequentPattern.basic.FFIMiner_old"]], "pami.fuzzyfrequentpattern.basic.abstract": [[34, "module-PAMI.fuzzyFrequentPattern.basic.abstract"]], "getmemoryrss() (pami.fuzzyfrequentpattern.basic.ffiminer.ffiminer method)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner.FFIMiner.getMemoryRSS"]], "getmemoryrss() (pami.fuzzyfrequentpattern.basic.ffiminer_old.ffiminer method)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner_old.FFIMiner.getMemoryRSS"]], "getmemoryuss() (pami.fuzzyfrequentpattern.basic.ffiminer.ffiminer method)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner.FFIMiner.getMemoryUSS"]], "getmemoryuss() (pami.fuzzyfrequentpattern.basic.ffiminer_old.ffiminer method)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner_old.FFIMiner.getMemoryUSS"]], "getpatterns() (pami.fuzzyfrequentpattern.basic.ffiminer.ffiminer method)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner.FFIMiner.getPatterns"]], "getpatterns() (pami.fuzzyfrequentpattern.basic.ffiminer_old.ffiminer method)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner_old.FFIMiner.getPatterns"]], "getpatternsasdataframe() (pami.fuzzyfrequentpattern.basic.ffiminer.ffiminer method)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner.FFIMiner.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.fuzzyfrequentpattern.basic.ffiminer_old.ffiminer method)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner_old.FFIMiner.getPatternsAsDataFrame"]], "getruntime() (pami.fuzzyfrequentpattern.basic.ffiminer.ffiminer method)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner.FFIMiner.getRuntime"]], "getruntime() (pami.fuzzyfrequentpattern.basic.ffiminer_old.ffiminer method)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner_old.FFIMiner.getRuntime"]], "mine() (pami.fuzzyfrequentpattern.basic.ffiminer.ffiminer method)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner.FFIMiner.mine"]], "mine() (pami.fuzzyfrequentpattern.basic.ffiminer_old.ffiminer method)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner_old.FFIMiner.mine"]], "printresults() (pami.fuzzyfrequentpattern.basic.ffiminer.ffiminer method)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner.FFIMiner.printResults"]], "printresults() (pami.fuzzyfrequentpattern.basic.ffiminer_old.ffiminer method)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner_old.FFIMiner.printResults"]], "save() (pami.fuzzyfrequentpattern.basic.ffiminer.ffiminer method)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner.FFIMiner.save"]], "save() (pami.fuzzyfrequentpattern.basic.ffiminer_old.ffiminer method)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner_old.FFIMiner.save"]], "startmine() (pami.fuzzyfrequentpattern.basic.ffiminer.ffiminer method)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner.FFIMiner.startMine"]], "startmine() (pami.fuzzyfrequentpattern.basic.ffiminer_old.ffiminer method)": [[34, "PAMI.fuzzyFrequentPattern.basic.FFIMiner_old.FFIMiner.startMine"]], "pami.fuzzygeoreferencedfrequentpattern": [[35, "module-PAMI.fuzzyGeoreferencedFrequentPattern"]], "ffspminer (class in pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner.FFSPMiner"]], "ffspminer (class in pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer_old)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old.FFSPMiner"]], "pami.fuzzygeoreferencedfrequentpattern.basic": [[36, "module-PAMI.fuzzyGeoreferencedFrequentPattern.basic"]], "pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer": [[36, "module-PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner"]], "pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer_old": [[36, "module-PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old"]], "pami.fuzzygeoreferencedfrequentpattern.basic.abstract": [[36, "module-PAMI.fuzzyGeoreferencedFrequentPattern.basic.abstract"]], "getmemoryrss() (pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer.ffspminer method)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner.FFSPMiner.getMemoryRSS"]], "getmemoryrss() (pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer_old.ffspminer method)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old.FFSPMiner.getMemoryRSS"]], "getmemoryuss() (pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer.ffspminer method)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner.FFSPMiner.getMemoryUSS"]], "getmemoryuss() (pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer_old.ffspminer method)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old.FFSPMiner.getMemoryUSS"]], "getpatterns() (pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer.ffspminer method)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner.FFSPMiner.getPatterns"]], "getpatterns() (pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer_old.ffspminer method)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old.FFSPMiner.getPatterns"]], "getpatternsasdataframe() (pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer.ffspminer method)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner.FFSPMiner.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer_old.ffspminer method)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old.FFSPMiner.getPatternsAsDataFrame"]], "getruntime() (pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer.ffspminer method)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner.FFSPMiner.getRuntime"]], "getruntime() (pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer_old.ffspminer method)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old.FFSPMiner.getRuntime"]], "mine() (pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer.ffspminer method)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner.FFSPMiner.mine"]], "mine() (pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer_old.ffspminer method)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old.FFSPMiner.mine"]], "printresults() (pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer.ffspminer method)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner.FFSPMiner.printResults"]], "printresults() (pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer_old.ffspminer method)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old.FFSPMiner.printResults"]], "save() (pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer.ffspminer method)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner.FFSPMiner.save"]], "save() (pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer_old.ffspminer method)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old.FFSPMiner.save"]], "startmine() (pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer.ffspminer method)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner.FFSPMiner.startMine"]], "startmine() (pami.fuzzygeoreferencedfrequentpattern.basic.ffspminer_old.ffspminer method)": [[36, "PAMI.fuzzyGeoreferencedFrequentPattern.basic.FFSPMiner_old.FFSPMiner.startMine"]], "pami.fuzzygeoreferencedperiodicfrequentpattern": [[37, "module-PAMI.fuzzyGeoreferencedPeriodicFrequentPattern"]], "fgpfpminer (class in pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner.FGPFPMiner"]], "fgpfpminer (class in pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer_old)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old.FGPFPMiner"]], "pami.fuzzygeoreferencedperiodicfrequentpattern.basic": [[38, "module-PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic"]], "pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer": [[38, "module-PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner"]], "pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer_old": [[38, "module-PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old"]], "pami.fuzzygeoreferencedperiodicfrequentpattern.basic.abstract": [[38, "module-PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.abstract"]], "generategraphs() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer_old.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old.FGPFPMiner.generateGraphs"]], "generatelatexcode() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer_old.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old.FGPFPMiner.generateLatexCode"]], "getmemoryrss() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner.FGPFPMiner.getMemoryRSS"]], "getmemoryrss() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer_old.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old.FGPFPMiner.getMemoryRSS"]], "getmemoryuss() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner.FGPFPMiner.getMemoryUSS"]], "getmemoryuss() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer_old.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old.FGPFPMiner.getMemoryUSS"]], "getpatterns() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner.FGPFPMiner.getPatterns"]], "getpatterns() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer_old.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old.FGPFPMiner.getPatterns"]], "getpatternsasdataframe() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner.FGPFPMiner.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer_old.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old.FGPFPMiner.getPatternsAsDataFrame"], [38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old.FGPFPMiner.getPatternsAsDataframe"]], "getruntime() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner.FGPFPMiner.getRuntime"]], "getruntime() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer_old.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old.FGPFPMiner.getRuntime"]], "mine() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner.FGPFPMiner.mine"]], "mine() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer_old.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old.FGPFPMiner.mine"]], "printresults() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner.FGPFPMiner.printResults"]], "printresults() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer_old.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old.FGPFPMiner.printResults"]], "save() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner.FGPFPMiner.save"]], "save() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer_old.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old.FGPFPMiner.save"]], "startmine() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner.FGPFPMiner.startMine"]], "startmine() (pami.fuzzygeoreferencedperiodicfrequentpattern.basic.fgpfpminer_old.fgpfpminer method)": [[38, "PAMI.fuzzyGeoreferencedPeriodicFrequentPattern.basic.FGPFPMiner_old.FGPFPMiner.startMine"]], "pami.fuzzypartialperiodicpatterns": [[39, "module-PAMI.fuzzyPartialPeriodicPatterns"]], "f3pminer (class in pami.fuzzypartialperiodicpatterns.basic.f3pminer)": [[40, "PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner.F3PMiner"]], "pami.fuzzypartialperiodicpatterns.basic": [[40, "module-PAMI.fuzzyPartialPeriodicPatterns.basic"]], "pami.fuzzypartialperiodicpatterns.basic.f3pminer": [[40, "module-PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner"]], "pami.fuzzypartialperiodicpatterns.basic.abstract": [[40, "module-PAMI.fuzzyPartialPeriodicPatterns.basic.abstract"]], "getmemoryrss() (pami.fuzzypartialperiodicpatterns.basic.f3pminer.f3pminer method)": [[40, "PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner.F3PMiner.getMemoryRSS"]], "getmemoryuss() (pami.fuzzypartialperiodicpatterns.basic.f3pminer.f3pminer method)": [[40, "PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner.F3PMiner.getMemoryUSS"]], "getpatterns() (pami.fuzzypartialperiodicpatterns.basic.f3pminer.f3pminer method)": [[40, "PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner.F3PMiner.getPatterns"]], "getpatternsasdataframe() (pami.fuzzypartialperiodicpatterns.basic.f3pminer.f3pminer method)": [[40, "PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner.F3PMiner.getPatternsAsDataFrame"]], "getruntime() (pami.fuzzypartialperiodicpatterns.basic.f3pminer.f3pminer method)": [[40, "PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner.F3PMiner.getRuntime"]], "mine() (pami.fuzzypartialperiodicpatterns.basic.f3pminer.f3pminer method)": [[40, "PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner.F3PMiner.mine"]], "printresults() (pami.fuzzypartialperiodicpatterns.basic.f3pminer.f3pminer method)": [[40, "PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner.F3PMiner.printResults"]], "save() (pami.fuzzypartialperiodicpatterns.basic.f3pminer.f3pminer method)": [[40, "PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner.F3PMiner.save"]], "startmine() (pami.fuzzypartialperiodicpatterns.basic.f3pminer.f3pminer method)": [[40, "PAMI.fuzzyPartialPeriodicPatterns.basic.F3PMiner.F3PMiner.startMine"]], "pami.fuzzyperiodicfrequentpattern": [[41, "module-PAMI.fuzzyPeriodicFrequentPattern"]], "fpfpminer (class in pami.fuzzyperiodicfrequentpattern.basic.fpfpminer)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner.FPFPMiner"]], "fpfpminer (class in pami.fuzzyperiodicfrequentpattern.basic.fpfpminer_old)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old.FPFPMiner"]], "pami.fuzzyperiodicfrequentpattern.basic": [[42, "module-PAMI.fuzzyPeriodicFrequentPattern.basic"]], "pami.fuzzyperiodicfrequentpattern.basic.fpfpminer": [[42, "module-PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner"]], "pami.fuzzyperiodicfrequentpattern.basic.fpfpminer_old": [[42, "module-PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old"]], "pami.fuzzyperiodicfrequentpattern.basic.abstract": [[42, "module-PAMI.fuzzyPeriodicFrequentPattern.basic.abstract"]], "getmemoryrss() (pami.fuzzyperiodicfrequentpattern.basic.fpfpminer.fpfpminer method)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner.FPFPMiner.getMemoryRSS"]], "getmemoryrss() (pami.fuzzyperiodicfrequentpattern.basic.fpfpminer_old.fpfpminer method)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old.FPFPMiner.getMemoryRSS"]], "getmemoryuss() (pami.fuzzyperiodicfrequentpattern.basic.fpfpminer.fpfpminer method)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner.FPFPMiner.getMemoryUSS"]], "getmemoryuss() (pami.fuzzyperiodicfrequentpattern.basic.fpfpminer_old.fpfpminer method)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old.FPFPMiner.getMemoryUSS"]], "getpatterns() (pami.fuzzyperiodicfrequentpattern.basic.fpfpminer.fpfpminer method)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner.FPFPMiner.getPatterns"]], "getpatterns() (pami.fuzzyperiodicfrequentpattern.basic.fpfpminer_old.fpfpminer method)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old.FPFPMiner.getPatterns"]], "getpatternsasdataframe() (pami.fuzzyperiodicfrequentpattern.basic.fpfpminer.fpfpminer method)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner.FPFPMiner.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.fuzzyperiodicfrequentpattern.basic.fpfpminer_old.fpfpminer method)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old.FPFPMiner.getPatternsAsDataFrame"]], "getruntime() (pami.fuzzyperiodicfrequentpattern.basic.fpfpminer.fpfpminer method)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner.FPFPMiner.getRuntime"]], "getruntime() (pami.fuzzyperiodicfrequentpattern.basic.fpfpminer_old.fpfpminer method)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old.FPFPMiner.getRuntime"]], "mine() (pami.fuzzyperiodicfrequentpattern.basic.fpfpminer.fpfpminer method)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner.FPFPMiner.mine"]], "mine() (pami.fuzzyperiodicfrequentpattern.basic.fpfpminer_old.fpfpminer method)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old.FPFPMiner.mine"]], "printresults() (pami.fuzzyperiodicfrequentpattern.basic.fpfpminer.fpfpminer method)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner.FPFPMiner.printResults"]], "printresults() (pami.fuzzyperiodicfrequentpattern.basic.fpfpminer_old.fpfpminer method)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old.FPFPMiner.printResults"]], "save() (pami.fuzzyperiodicfrequentpattern.basic.fpfpminer.fpfpminer method)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner.FPFPMiner.save"]], "save() (pami.fuzzyperiodicfrequentpattern.basic.fpfpminer_old.fpfpminer method)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old.FPFPMiner.save"]], "startmine() (pami.fuzzyperiodicfrequentpattern.basic.fpfpminer.fpfpminer method)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner.FPFPMiner.startMine"]], "startmine() (pami.fuzzyperiodicfrequentpattern.basic.fpfpminer_old.fpfpminer method)": [[42, "PAMI.fuzzyPeriodicFrequentPattern.basic.FPFPMiner_old.FPFPMiner.startMine"]], "pami.georeferencedperiodicfrequentpattern": [[43, "module-PAMI.geoReferencedPeriodicFrequentPattern"]], "gpfpminer (class in pami.georeferencedperiodicfrequentpattern.basic.gpfpminer)": [[44, "PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner.GPFPMiner"]], "pami.georeferencedperiodicfrequentpattern.basic": [[44, "module-PAMI.geoReferencedPeriodicFrequentPattern.basic"]], "pami.georeferencedperiodicfrequentpattern.basic.gpfpminer": [[44, "module-PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner"]], "pami.georeferencedperiodicfrequentpattern.basic.abstract": [[44, "module-PAMI.geoReferencedPeriodicFrequentPattern.basic.abstract"]], "getmemoryrss() (pami.georeferencedperiodicfrequentpattern.basic.gpfpminer.gpfpminer method)": [[44, "PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner.GPFPMiner.getMemoryRSS"]], "getmemoryuss() (pami.georeferencedperiodicfrequentpattern.basic.gpfpminer.gpfpminer method)": [[44, "PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner.GPFPMiner.getMemoryUSS"]], "getpatterns() (pami.georeferencedperiodicfrequentpattern.basic.gpfpminer.gpfpminer method)": [[44, "PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner.GPFPMiner.getPatterns"]], "getpatternsasdataframe() (pami.georeferencedperiodicfrequentpattern.basic.gpfpminer.gpfpminer method)": [[44, "PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner.GPFPMiner.getPatternsAsDataFrame"]], "getruntime() (pami.georeferencedperiodicfrequentpattern.basic.gpfpminer.gpfpminer method)": [[44, "PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner.GPFPMiner.getRuntime"]], "mapneighbours() (pami.georeferencedperiodicfrequentpattern.basic.gpfpminer.gpfpminer method)": [[44, "PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner.GPFPMiner.mapNeighbours"]], "mine() (pami.georeferencedperiodicfrequentpattern.basic.gpfpminer.gpfpminer method)": [[44, "PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner.GPFPMiner.mine"]], "printresults() (pami.georeferencedperiodicfrequentpattern.basic.gpfpminer.gpfpminer method)": [[44, "PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner.GPFPMiner.printResults"]], "save() (pami.georeferencedperiodicfrequentpattern.basic.gpfpminer.gpfpminer method)": [[44, "PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner.GPFPMiner.save"]], "startmine() (pami.georeferencedperiodicfrequentpattern.basic.gpfpminer.gpfpminer method)": [[44, "PAMI.geoReferencedPeriodicFrequentPattern.basic.GPFPMiner.GPFPMiner.startMine"]], "pami.georeferencedfrequentpattern": [[45, "module-PAMI.georeferencedFrequentPattern"]], "pami.georeferencedfrequentpattern.basic": [[46, "module-PAMI.georeferencedFrequentPattern.basic"]], "pami.georeferencedfrequentpattern.basic.spatialeclat": [[46, "module-PAMI.georeferencedFrequentPattern.basic.SpatialECLAT"]], "pami.georeferencedfrequentpattern.basic.abstract": [[46, "module-PAMI.georeferencedFrequentPattern.basic.abstract"]], "spatialeclat (class in pami.georeferencedfrequentpattern.basic.spatialeclat)": [[46, "PAMI.georeferencedFrequentPattern.basic.SpatialECLAT.SpatialECLAT"]], "getmemoryrss() (pami.georeferencedfrequentpattern.basic.spatialeclat.spatialeclat method)": [[46, "PAMI.georeferencedFrequentPattern.basic.SpatialECLAT.SpatialECLAT.getMemoryRSS"]], "getmemoryuss() (pami.georeferencedfrequentpattern.basic.spatialeclat.spatialeclat method)": [[46, "PAMI.georeferencedFrequentPattern.basic.SpatialECLAT.SpatialECLAT.getMemoryUSS"]], "getpatterns() (pami.georeferencedfrequentpattern.basic.spatialeclat.spatialeclat method)": [[46, "PAMI.georeferencedFrequentPattern.basic.SpatialECLAT.SpatialECLAT.getPatterns"]], "getpatternsasdataframe() (pami.georeferencedfrequentpattern.basic.spatialeclat.spatialeclat method)": [[46, "PAMI.georeferencedFrequentPattern.basic.SpatialECLAT.SpatialECLAT.getPatternsAsDataFrame"]], "getruntime() (pami.georeferencedfrequentpattern.basic.spatialeclat.spatialeclat method)": [[46, "PAMI.georeferencedFrequentPattern.basic.SpatialECLAT.SpatialECLAT.getRuntime"]], "mine() (pami.georeferencedfrequentpattern.basic.spatialeclat.spatialeclat method)": [[46, "PAMI.georeferencedFrequentPattern.basic.SpatialECLAT.SpatialECLAT.mine"]], "printresults() (pami.georeferencedfrequentpattern.basic.spatialeclat.spatialeclat method)": [[46, "PAMI.georeferencedFrequentPattern.basic.SpatialECLAT.SpatialECLAT.printResults"]], "save() (pami.georeferencedfrequentpattern.basic.spatialeclat.spatialeclat method)": [[46, "PAMI.georeferencedFrequentPattern.basic.SpatialECLAT.SpatialECLAT.save"]], "startmine() (pami.georeferencedfrequentpattern.basic.spatialeclat.spatialeclat method)": [[46, "PAMI.georeferencedFrequentPattern.basic.SpatialECLAT.SpatialECLAT.startMine"]], "pami.georeferencedfrequentsequencepattern": [[47, "module-PAMI.georeferencedFrequentSequencePattern"]], "pami.georeferencedfrequentsequencepattern.abstract": [[47, "module-PAMI.georeferencedFrequentSequencePattern.abstract"]], "pami.georeferencedpartialperiodicpattern": [[48, "module-PAMI.georeferencedPartialPeriodicPattern"]], "pami.georeferencedpartialperiodicpattern.basic": [[49, "module-PAMI.georeferencedPartialPeriodicPattern.basic"]], "pami.georeferencedpartialperiodicpattern.basic.steclat": [[49, "module-PAMI.georeferencedPartialPeriodicPattern.basic.STEclat"]], "pami.georeferencedpartialperiodicpattern.basic.abstract": [[49, "module-PAMI.georeferencedPartialPeriodicPattern.basic.abstract"]], "steclat (class in pami.georeferencedpartialperiodicpattern.basic.steclat)": [[49, "PAMI.georeferencedPartialPeriodicPattern.basic.STEclat.STEclat"]], "getmemoryrss() (pami.georeferencedpartialperiodicpattern.basic.steclat.steclat method)": [[49, "PAMI.georeferencedPartialPeriodicPattern.basic.STEclat.STEclat.getMemoryRSS"]], "getmemoryuss() (pami.georeferencedpartialperiodicpattern.basic.steclat.steclat method)": [[49, "PAMI.georeferencedPartialPeriodicPattern.basic.STEclat.STEclat.getMemoryUSS"]], "getpatterns() (pami.georeferencedpartialperiodicpattern.basic.steclat.steclat method)": [[49, "PAMI.georeferencedPartialPeriodicPattern.basic.STEclat.STEclat.getPatterns"]], "getpatternsasdataframe() (pami.georeferencedpartialperiodicpattern.basic.steclat.steclat method)": [[49, "PAMI.georeferencedPartialPeriodicPattern.basic.STEclat.STEclat.getPatternsAsDataFrame"]], "getruntime() (pami.georeferencedpartialperiodicpattern.basic.steclat.steclat method)": [[49, "PAMI.georeferencedPartialPeriodicPattern.basic.STEclat.STEclat.getRuntime"]], "mapneighbours() (pami.georeferencedpartialperiodicpattern.basic.steclat.steclat method)": [[49, "PAMI.georeferencedPartialPeriodicPattern.basic.STEclat.STEclat.mapNeighbours"]], "mine() (pami.georeferencedpartialperiodicpattern.basic.steclat.steclat method)": [[49, "PAMI.georeferencedPartialPeriodicPattern.basic.STEclat.STEclat.mine"]], "printresults() (pami.georeferencedpartialperiodicpattern.basic.steclat.steclat method)": [[49, "PAMI.georeferencedPartialPeriodicPattern.basic.STEclat.STEclat.printResults"]], "save() (pami.georeferencedpartialperiodicpattern.basic.steclat.steclat method)": [[49, "PAMI.georeferencedPartialPeriodicPattern.basic.STEclat.STEclat.save"]], "startmine() (pami.georeferencedpartialperiodicpattern.basic.steclat.steclat method)": [[49, "PAMI.georeferencedPartialPeriodicPattern.basic.STEclat.STEclat.startMine"]], "pami.highutilityfrequentpattern": [[50, "module-PAMI.highUtilityFrequentPattern"]], "hufim (class in pami.highutilityfrequentpattern.basic.hufim)": [[51, "PAMI.highUtilityFrequentPattern.basic.HUFIM.HUFIM"]], "pami.highutilityfrequentpattern.basic": [[51, "module-PAMI.highUtilityFrequentPattern.basic"]], "pami.highutilityfrequentpattern.basic.hufim": [[51, "module-PAMI.highUtilityFrequentPattern.basic.HUFIM"]], "pami.highutilityfrequentpattern.basic.abstract": [[51, "module-PAMI.highUtilityFrequentPattern.basic.abstract"]], "getmemoryrss() (pami.highutilityfrequentpattern.basic.hufim.hufim method)": [[51, "PAMI.highUtilityFrequentPattern.basic.HUFIM.HUFIM.getMemoryRSS"]], "getmemoryuss() (pami.highutilityfrequentpattern.basic.hufim.hufim method)": [[51, "PAMI.highUtilityFrequentPattern.basic.HUFIM.HUFIM.getMemoryUSS"]], "getpatterns() (pami.highutilityfrequentpattern.basic.hufim.hufim method)": [[51, "PAMI.highUtilityFrequentPattern.basic.HUFIM.HUFIM.getPatterns"]], "getpatternsasdataframe() (pami.highutilityfrequentpattern.basic.hufim.hufim method)": [[51, "PAMI.highUtilityFrequentPattern.basic.HUFIM.HUFIM.getPatternsAsDataFrame"]], "getruntime() (pami.highutilityfrequentpattern.basic.hufim.hufim method)": [[51, "PAMI.highUtilityFrequentPattern.basic.HUFIM.HUFIM.getRuntime"]], "mine() (pami.highutilityfrequentpattern.basic.hufim.hufim method)": [[51, "PAMI.highUtilityFrequentPattern.basic.HUFIM.HUFIM.mine"]], "printresults() (pami.highutilityfrequentpattern.basic.hufim.hufim method)": [[51, "PAMI.highUtilityFrequentPattern.basic.HUFIM.HUFIM.printResults"]], "save() (pami.highutilityfrequentpattern.basic.hufim.hufim method)": [[51, "PAMI.highUtilityFrequentPattern.basic.HUFIM.HUFIM.save"]], "startmine() (pami.highutilityfrequentpattern.basic.hufim.hufim method)": [[51, "PAMI.highUtilityFrequentPattern.basic.HUFIM.HUFIM.startMine"]], "pami.highutilitygeoreferencedfrequentpattern": [[52, "module-PAMI.highUtilityGeoreferencedFrequentPattern"]], "pami.highutilitygeoreferencedfrequentpattern.basic": [[53, "module-PAMI.highUtilityGeoreferencedFrequentPattern.basic"]], "pami.highutilitygeoreferencedfrequentpattern.basic.shufim": [[53, "module-PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM"]], "pami.highutilitygeoreferencedfrequentpattern.basic.abstract": [[53, "module-PAMI.highUtilityGeoreferencedFrequentPattern.basic.abstract"]], "shufim (class in pami.highutilitygeoreferencedfrequentpattern.basic.shufim)": [[53, "PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM.SHUFIM"]], "getmemoryrss() (pami.highutilitygeoreferencedfrequentpattern.basic.shufim.shufim method)": [[53, "PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM.SHUFIM.getMemoryRSS"]], "getmemoryuss() (pami.highutilitygeoreferencedfrequentpattern.basic.shufim.shufim method)": [[53, "PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM.SHUFIM.getMemoryUSS"]], "getpatterns() (pami.highutilitygeoreferencedfrequentpattern.basic.shufim.shufim method)": [[53, "PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM.SHUFIM.getPatterns"]], "getpatternsasdataframe() (pami.highutilitygeoreferencedfrequentpattern.basic.shufim.shufim method)": [[53, "PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM.SHUFIM.getPatternsAsDataFrame"]], "getruntime() (pami.highutilitygeoreferencedfrequentpattern.basic.shufim.shufim method)": [[53, "PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM.SHUFIM.getRuntime"]], "main() (in module pami.highutilitygeoreferencedfrequentpattern.basic.shufim)": [[53, "PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM.main"]], "mine() (pami.highutilitygeoreferencedfrequentpattern.basic.shufim.shufim method)": [[53, "PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM.SHUFIM.mine"]], "printresults() (pami.highutilitygeoreferencedfrequentpattern.basic.shufim.shufim method)": [[53, "PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM.SHUFIM.printResults"]], "save() (pami.highutilitygeoreferencedfrequentpattern.basic.shufim.shufim method)": [[53, "PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM.SHUFIM.save"]], "startmine() (pami.highutilitygeoreferencedfrequentpattern.basic.shufim.shufim method)": [[53, "PAMI.highUtilityGeoreferencedFrequentPattern.basic.SHUFIM.SHUFIM.startMine"]], "pami.highutilitypattern": [[54, "module-PAMI.highUtilityPattern"]], "efim (class in pami.highutilitypattern.basic.efim)": [[55, "PAMI.highUtilityPattern.basic.EFIM.EFIM"]], "hminer (class in pami.highutilitypattern.basic.hminer)": [[55, "PAMI.highUtilityPattern.basic.HMiner.HMiner"]], "pami.highutilitypattern.basic": [[55, "module-PAMI.highUtilityPattern.basic"]], "pami.highutilitypattern.basic.efim": [[55, "module-PAMI.highUtilityPattern.basic.EFIM"]], "pami.highutilitypattern.basic.hminer": [[55, "module-PAMI.highUtilityPattern.basic.HMiner"]], "pami.highutilitypattern.basic.upgrowth": [[55, "module-PAMI.highUtilityPattern.basic.UPGrowth"]], "pami.highutilitypattern.basic.abstract": [[55, "module-PAMI.highUtilityPattern.basic.abstract"]], "printstats() (pami.highutilitypattern.basic.upgrowth.upgrowth method)": [[55, "PAMI.highUtilityPattern.basic.UPGrowth.UPGrowth.PrintStats"]], "upgrowth (class in pami.highutilitypattern.basic.upgrowth)": [[55, "PAMI.highUtilityPattern.basic.UPGrowth.UPGrowth"]], "getmemoryrss() (pami.highutilitypattern.basic.efim.efim method)": [[55, "PAMI.highUtilityPattern.basic.EFIM.EFIM.getMemoryRSS"]], "getmemoryrss() (pami.highutilitypattern.basic.hminer.hminer method)": [[55, "PAMI.highUtilityPattern.basic.HMiner.HMiner.getMemoryRSS"]], "getmemoryrss() (pami.highutilitypattern.basic.upgrowth.upgrowth method)": [[55, "PAMI.highUtilityPattern.basic.UPGrowth.UPGrowth.getMemoryRSS"]], "getmemoryuss() (pami.highutilitypattern.basic.efim.efim method)": [[55, "PAMI.highUtilityPattern.basic.EFIM.EFIM.getMemoryUSS"]], "getmemoryuss() (pami.highutilitypattern.basic.hminer.hminer method)": [[55, "PAMI.highUtilityPattern.basic.HMiner.HMiner.getMemoryUSS"]], "getmemoryuss() (pami.highutilitypattern.basic.upgrowth.upgrowth method)": [[55, "PAMI.highUtilityPattern.basic.UPGrowth.UPGrowth.getMemoryUSS"]], "getpatterns() (pami.highutilitypattern.basic.efim.efim method)": [[55, "PAMI.highUtilityPattern.basic.EFIM.EFIM.getPatterns"]], "getpatterns() (pami.highutilitypattern.basic.hminer.hminer method)": [[55, "PAMI.highUtilityPattern.basic.HMiner.HMiner.getPatterns"]], "getpatterns() (pami.highutilitypattern.basic.upgrowth.upgrowth method)": [[55, "PAMI.highUtilityPattern.basic.UPGrowth.UPGrowth.getPatterns"]], "getpatternsasdataframe() (pami.highutilitypattern.basic.efim.efim method)": [[55, "PAMI.highUtilityPattern.basic.EFIM.EFIM.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.highutilitypattern.basic.hminer.hminer method)": [[55, "PAMI.highUtilityPattern.basic.HMiner.HMiner.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.highutilitypattern.basic.upgrowth.upgrowth method)": [[55, "PAMI.highUtilityPattern.basic.UPGrowth.UPGrowth.getPatternsAsDataFrame"]], "getruntime() (pami.highutilitypattern.basic.efim.efim method)": [[55, "PAMI.highUtilityPattern.basic.EFIM.EFIM.getRuntime"]], "getruntime() (pami.highutilitypattern.basic.hminer.hminer method)": [[55, "PAMI.highUtilityPattern.basic.HMiner.HMiner.getRuntime"]], "getruntime() (pami.highutilitypattern.basic.upgrowth.upgrowth method)": [[55, "PAMI.highUtilityPattern.basic.UPGrowth.UPGrowth.getRuntime"]], "mine() (pami.highutilitypattern.basic.efim.efim method)": [[55, "PAMI.highUtilityPattern.basic.EFIM.EFIM.mine"]], "mine() (pami.highutilitypattern.basic.hminer.hminer method)": [[55, "PAMI.highUtilityPattern.basic.HMiner.HMiner.mine"]], "mine() (pami.highutilitypattern.basic.upgrowth.upgrowth method)": [[55, "PAMI.highUtilityPattern.basic.UPGrowth.UPGrowth.mine"]], "printresults() (pami.highutilitypattern.basic.efim.efim method)": [[55, "PAMI.highUtilityPattern.basic.EFIM.EFIM.printResults"]], "printresults() (pami.highutilitypattern.basic.hminer.hminer method)": [[55, "PAMI.highUtilityPattern.basic.HMiner.HMiner.printResults"]], "printresults() (pami.highutilitypattern.basic.upgrowth.upgrowth method)": [[55, "PAMI.highUtilityPattern.basic.UPGrowth.UPGrowth.printResults"]], "save() (pami.highutilitypattern.basic.efim.efim method)": [[55, "PAMI.highUtilityPattern.basic.EFIM.EFIM.save"]], "save() (pami.highutilitypattern.basic.hminer.hminer method)": [[55, "PAMI.highUtilityPattern.basic.HMiner.HMiner.save"]], "save() (pami.highutilitypattern.basic.upgrowth.upgrowth method)": [[55, "PAMI.highUtilityPattern.basic.UPGrowth.UPGrowth.save"]], "sort_transaction() (pami.highutilitypattern.basic.efim.efim method)": [[55, "PAMI.highUtilityPattern.basic.EFIM.EFIM.sort_transaction"]], "startmine() (pami.highutilitypattern.basic.efim.efim method)": [[55, "PAMI.highUtilityPattern.basic.EFIM.EFIM.startMine"]], "startmine() (pami.highutilitypattern.basic.hminer.hminer method)": [[55, "PAMI.highUtilityPattern.basic.HMiner.HMiner.startMine"]], "startmine() (pami.highutilitypattern.basic.upgrowth.upgrowth method)": [[55, "PAMI.highUtilityPattern.basic.UPGrowth.UPGrowth.startMine"]], "pami.highutilitypattern.parallel": [[56, "module-PAMI.highUtilityPattern.parallel"]], "pami.highutilitypattern.parallel.abstract": [[56, "module-PAMI.highUtilityPattern.parallel.abstract"]], "pami.highutilitypatternsinstreams": [[57, "module-PAMI.highUtilityPatternsInStreams"]], "pami.highutilitypatternsinstreams.abstract": [[57, "module-PAMI.highUtilityPatternsInStreams.abstract"]], "pami.highutilityspatialpattern": [[58, "module-PAMI.highUtilitySpatialPattern"]], "pami.highutilityspatialpattern.abstract": [[58, "module-PAMI.highUtilitySpatialPattern.abstract"]], "endtime() (pami.highutilityspatialpattern.abstract.utilitypatterns method)": [[58, "PAMI.highUtilitySpatialPattern.abstract.utilityPatterns.endTime"]], "finalpatterns() (pami.highutilityspatialpattern.abstract.utilitypatterns method)": [[58, "PAMI.highUtilitySpatialPattern.abstract.utilityPatterns.finalPatterns"]], "getmemoryrss() (pami.highutilityspatialpattern.abstract.utilitypatterns method)": [[58, "PAMI.highUtilitySpatialPattern.abstract.utilityPatterns.getMemoryRSS"]], "getmemoryuss() (pami.highutilityspatialpattern.abstract.utilitypatterns method)": [[58, "PAMI.highUtilitySpatialPattern.abstract.utilityPatterns.getMemoryUSS"]], "getpatterns() (pami.highutilityspatialpattern.abstract.utilitypatterns method)": [[58, "PAMI.highUtilitySpatialPattern.abstract.utilityPatterns.getPatterns"]], "getpatternsasdataframe() (pami.highutilityspatialpattern.abstract.utilitypatterns method)": [[58, "PAMI.highUtilitySpatialPattern.abstract.utilityPatterns.getPatternsAsDataFrame"]], "getruntime() (pami.highutilityspatialpattern.abstract.utilitypatterns method)": [[58, "PAMI.highUtilitySpatialPattern.abstract.utilityPatterns.getRuntime"]], "ifile() (pami.highutilityspatialpattern.abstract.utilitypatterns method)": [[58, "PAMI.highUtilitySpatialPattern.abstract.utilityPatterns.iFile"]], "memoryrss() (pami.highutilityspatialpattern.abstract.utilitypatterns method)": [[58, "PAMI.highUtilitySpatialPattern.abstract.utilityPatterns.memoryRSS"]], "memoryuss() (pami.highutilityspatialpattern.abstract.utilitypatterns method)": [[58, "PAMI.highUtilitySpatialPattern.abstract.utilityPatterns.memoryUSS"]], "minutil() (pami.highutilityspatialpattern.abstract.utilitypatterns method)": [[58, "PAMI.highUtilitySpatialPattern.abstract.utilityPatterns.minUtil"]], "nfile() (pami.highutilityspatialpattern.abstract.utilitypatterns method)": [[58, "PAMI.highUtilitySpatialPattern.abstract.utilityPatterns.nFile"]], "ofile() (pami.highutilityspatialpattern.abstract.utilitypatterns method)": [[58, "PAMI.highUtilitySpatialPattern.abstract.utilityPatterns.oFile"]], "save() (pami.highutilityspatialpattern.abstract.utilitypatterns method)": [[58, "PAMI.highUtilitySpatialPattern.abstract.utilityPatterns.save"]], "startmine() (pami.highutilityspatialpattern.abstract.utilitypatterns method)": [[58, "PAMI.highUtilitySpatialPattern.abstract.utilityPatterns.startMine"]], "starttime() (pami.highutilityspatialpattern.abstract.utilitypatterns method)": [[58, "PAMI.highUtilitySpatialPattern.abstract.utilityPatterns.startTime"]], "utilitypatterns (class in pami.highutilityspatialpattern.abstract)": [[58, "PAMI.highUtilitySpatialPattern.abstract.utilityPatterns"]], "hdshuim (class in pami.highutilityspatialpattern.basic.hdshuim)": [[59, "PAMI.highUtilitySpatialPattern.basic.HDSHUIM.HDSHUIM"]], "pami.highutilityspatialpattern.basic": [[59, "module-PAMI.highUtilitySpatialPattern.basic"]], "pami.highutilityspatialpattern.basic.hdshuim": [[59, "module-PAMI.highUtilitySpatialPattern.basic.HDSHUIM"]], "pami.highutilityspatialpattern.basic.shuim": [[59, "module-PAMI.highUtilitySpatialPattern.basic.SHUIM"]], "pami.highutilityspatialpattern.basic.abstract": [[59, "module-PAMI.highUtilitySpatialPattern.basic.abstract"]], "shuim (class in pami.highutilityspatialpattern.basic.shuim)": [[59, "PAMI.highUtilitySpatialPattern.basic.SHUIM.SHUIM"]], "getmemoryrss() (pami.highutilityspatialpattern.basic.hdshuim.hdshuim method)": [[59, "PAMI.highUtilitySpatialPattern.basic.HDSHUIM.HDSHUIM.getMemoryRSS"]], "getmemoryrss() (pami.highutilityspatialpattern.basic.shuim.shuim method)": [[59, "PAMI.highUtilitySpatialPattern.basic.SHUIM.SHUIM.getMemoryRSS"]], "getmemoryuss() (pami.highutilityspatialpattern.basic.hdshuim.hdshuim method)": [[59, "PAMI.highUtilitySpatialPattern.basic.HDSHUIM.HDSHUIM.getMemoryUSS"]], "getmemoryuss() (pami.highutilityspatialpattern.basic.shuim.shuim method)": [[59, "PAMI.highUtilitySpatialPattern.basic.SHUIM.SHUIM.getMemoryUSS"]], "getpatterns() (pami.highutilityspatialpattern.basic.hdshuim.hdshuim method)": [[59, "PAMI.highUtilitySpatialPattern.basic.HDSHUIM.HDSHUIM.getPatterns"]], "getpatterns() (pami.highutilityspatialpattern.basic.shuim.shuim method)": [[59, "PAMI.highUtilitySpatialPattern.basic.SHUIM.SHUIM.getPatterns"]], "getpatternsasdataframe() (pami.highutilityspatialpattern.basic.hdshuim.hdshuim method)": [[59, "PAMI.highUtilitySpatialPattern.basic.HDSHUIM.HDSHUIM.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.highutilityspatialpattern.basic.shuim.shuim method)": [[59, "PAMI.highUtilitySpatialPattern.basic.SHUIM.SHUIM.getPatternsAsDataFrame"]], "getruntime() (pami.highutilityspatialpattern.basic.hdshuim.hdshuim method)": [[59, "PAMI.highUtilitySpatialPattern.basic.HDSHUIM.HDSHUIM.getRuntime"]], "getruntime() (pami.highutilityspatialpattern.basic.shuim.shuim method)": [[59, "PAMI.highUtilitySpatialPattern.basic.SHUIM.SHUIM.getRuntime"]], "mine() (pami.highutilityspatialpattern.basic.hdshuim.hdshuim method)": [[59, "PAMI.highUtilitySpatialPattern.basic.HDSHUIM.HDSHUIM.mine"]], "mine() (pami.highutilityspatialpattern.basic.shuim.shuim method)": [[59, "PAMI.highUtilitySpatialPattern.basic.SHUIM.SHUIM.mine"]], "printresults() (pami.highutilityspatialpattern.basic.hdshuim.hdshuim method)": [[59, "PAMI.highUtilitySpatialPattern.basic.HDSHUIM.HDSHUIM.printResults"]], "printresults() (pami.highutilityspatialpattern.basic.shuim.shuim method)": [[59, "PAMI.highUtilitySpatialPattern.basic.SHUIM.SHUIM.printResults"]], "save() (pami.highutilityspatialpattern.basic.hdshuim.hdshuim method)": [[59, "PAMI.highUtilitySpatialPattern.basic.HDSHUIM.HDSHUIM.save"]], "save() (pami.highutilityspatialpattern.basic.shuim.shuim method)": [[59, "PAMI.highUtilitySpatialPattern.basic.SHUIM.SHUIM.save"]], "startmine() (pami.highutilityspatialpattern.basic.hdshuim.hdshuim method)": [[59, "PAMI.highUtilitySpatialPattern.basic.HDSHUIM.HDSHUIM.startMine"]], "startmine() (pami.highutilityspatialpattern.basic.shuim.shuim method)": [[59, "PAMI.highUtilitySpatialPattern.basic.SHUIM.SHUIM.startMine"]], "dataset (class in pami.highutilityspatialpattern.topk.tkshuim)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Dataset"]], "neighbours (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.Neighbours"]], "pami.highutilityspatialpattern.topk": [[60, "module-PAMI.highUtilitySpatialPattern.topk"]], "pami.highutilityspatialpattern.topk.tkshuim": [[60, "module-PAMI.highUtilitySpatialPattern.topk.TKSHUIM"]], "pami.highutilityspatialpattern.topk.abstract": [[60, "module-PAMI.highUtilitySpatialPattern.topk.abstract"]], "tkshuim (class in pami.highutilityspatialpattern.topk.tkshuim)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM"]], "transaction (class in pami.highutilityspatialpattern.topk.tkshuim)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Transaction"]], "additemset() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.additemset"]], "backtrackingefim() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.backtrackingEFIM"]], "calculateneighbourintersection() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.calculateNeighbourIntersection"]], "candidatecount (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.candidateCount"]], "createtransaction() (pami.highutilityspatialpattern.topk.tkshuim.dataset method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Dataset.createTransaction"]], "endtime (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.endTime"]], "endtime() (pami.highutilityspatialpattern.topk.abstract.utilitypatterns method)": [[60, "PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns.endTime"]], "finalpatterns (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.finalPatterns"]], "finalpatterns() (pami.highutilityspatialpattern.topk.abstract.utilitypatterns method)": [[60, "PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns.finalPatterns"]], "getitems() (pami.highutilityspatialpattern.topk.tkshuim.transaction method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Transaction.getItems"]], "getlastposition() (pami.highutilityspatialpattern.topk.tkshuim.transaction method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Transaction.getLastPosition"]], "getmaxitem() (pami.highutilityspatialpattern.topk.tkshuim.dataset method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Dataset.getMaxItem"]], "getmemoryrss() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.getMemoryRSS"]], "getmemoryrss() (pami.highutilityspatialpattern.topk.abstract.utilitypatterns method)": [[60, "PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns.getMemoryRSS"]], "getmemoryuss() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.getMemoryUSS"]], "getmemoryuss() (pami.highutilityspatialpattern.topk.abstract.utilitypatterns method)": [[60, "PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns.getMemoryUSS"]], "getpatterns() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.getPatterns"]], "getpatterns() (pami.highutilityspatialpattern.topk.abstract.utilitypatterns method)": [[60, "PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns.getPatterns"]], "getpatternsasdataframe() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.highutilityspatialpattern.topk.abstract.utilitypatterns method)": [[60, "PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns.getPatternsAsDataFrame"]], "getpmus() (pami.highutilityspatialpattern.topk.tkshuim.transaction method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Transaction.getPmus"]], "getruntime() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.getRuntime"]], "getruntime() (pami.highutilityspatialpattern.topk.abstract.utilitypatterns method)": [[60, "PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns.getRuntime"]], "gettransactions() (pami.highutilityspatialpattern.topk.tkshuim.dataset method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Dataset.getTransactions"]], "getutilities() (pami.highutilityspatialpattern.topk.tkshuim.transaction method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Transaction.getUtilities"]], "heaplist (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.heapList"]], "ifile (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.iFile"]], "ifile() (pami.highutilityspatialpattern.topk.abstract.utilitypatterns method)": [[60, "PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns.iFile"]], "insertionsort() (pami.highutilityspatialpattern.topk.tkshuim.transaction method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Transaction.insertionSort"]], "inttostr (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.intTostr"]], "intersection() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.intersection"]], "is_equal() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.is_equal"]], "main() (in module pami.highutilityspatialpattern.topk.tkshuim)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.main"]], "maxitem (pami.highutilityspatialpattern.topk.tkshuim.dataset attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Dataset.maxItem"]], "maxmemory (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.maxMemory"]], "memoryrss (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.memoryRSS"]], "memoryrss() (pami.highutilityspatialpattern.topk.abstract.utilitypatterns method)": [[60, "PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns.memoryRSS"]], "memoryuss (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.memoryUSS"]], "memoryuss() (pami.highutilityspatialpattern.topk.abstract.utilitypatterns method)": [[60, "PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns.memoryUSS"]], "minutil (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.minUtil"]], "mine() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.mine"]], "nfile (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.nFile"]], "nfile() (pami.highutilityspatialpattern.topk.abstract.utilitypatterns method)": [[60, "PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns.nFile"]], "newnamestooldnames (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.newNamesToOldNames"]], "ofile (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.oFile"]], "ofile() (pami.highutilityspatialpattern.topk.abstract.utilitypatterns method)": [[60, "PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns.oFile"]], "offset (pami.highutilityspatialpattern.topk.tkshuim.transaction attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Transaction.offset"]], "oldnamestonewnames (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.oldNamesToNewNames"]], "output() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.output"]], "prefixutility (pami.highutilityspatialpattern.topk.tkshuim.transaction attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Transaction.prefixUtility"]], "printresults() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.printResults"]], "printresults() (pami.highutilityspatialpattern.topk.abstract.utilitypatterns method)": [[60, "PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns.printResults"]], "projecttransaction() (pami.highutilityspatialpattern.topk.tkshuim.transaction method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Transaction.projectTransaction"]], "removeunpromisingitems() (pami.highutilityspatialpattern.topk.tkshuim.transaction method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Transaction.removeUnpromisingItems"]], "save() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.save"]], "save() (pami.highutilityspatialpattern.topk.abstract.utilitypatterns method)": [[60, "PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns.save"]], "sep (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.sep"]], "sortdatabase() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.sortDatabase"]], "sort_transaction() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.sort_transaction"]], "startmine() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.startMine"]], "startmine() (pami.highutilityspatialpattern.topk.abstract.utilitypatterns method)": [[60, "PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns.startMine"]], "starttime (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.startTime"]], "starttime() (pami.highutilityspatialpattern.topk.abstract.utilitypatterns method)": [[60, "PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns.startTime"]], "strtoint (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.strToint"]], "temp (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.temp"]], "transactions (pami.highutilityspatialpattern.topk.tkshuim.dataset attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.Dataset.transactions"]], "useutilitybinarraytocalculatelocalutilityfirsttime() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.useUtilityBinArrayToCalculateLocalUtilityFirstTime"]], "useutilitybinarraytocalculatesubtreeutilityfirsttime() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.useUtilityBinArrayToCalculateSubtreeUtilityFirstTime"]], "useutilitybinarraystocalculateupperbounds() (pami.highutilityspatialpattern.topk.tkshuim.tkshuim method)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.useUtilityBinArraysToCalculateUpperBounds"]], "utilitybinarraylu (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.utilityBinArrayLU"]], "utilitybinarraysu (pami.highutilityspatialpattern.topk.tkshuim.tkshuim attribute)": [[60, "PAMI.highUtilitySpatialPattern.topk.TKSHUIM.TKSHUIM.utilityBinArraySU"]], "utilitypatterns (class in pami.highutilityspatialpattern.topk.abstract)": [[60, "PAMI.highUtilitySpatialPattern.topk.abstract.utilityPatterns"]], "pami.localperiodicpattern": [[61, "module-PAMI.localPeriodicPattern"]], "lppgrowth (class in pami.localperiodicpattern.basic.lppgrowth)": [[62, "PAMI.localPeriodicPattern.basic.LPPGrowth.LPPGrowth"]], "lppmbreadth (class in pami.localperiodicpattern.basic.lppmbreadth)": [[62, "PAMI.localPeriodicPattern.basic.LPPMBreadth.LPPMBreadth"]], "lppmdepth (class in pami.localperiodicpattern.basic.lppmdepth)": [[62, "PAMI.localPeriodicPattern.basic.LPPMDepth.LPPMDepth"]], "node (class in pami.localperiodicpattern.basic.lppgrowth)": [[62, "PAMI.localPeriodicPattern.basic.LPPGrowth.Node"]], "pami.localperiodicpattern.basic": [[62, "module-PAMI.localPeriodicPattern.basic"]], "pami.localperiodicpattern.basic.lppgrowth": [[62, "module-PAMI.localPeriodicPattern.basic.LPPGrowth"]], "pami.localperiodicpattern.basic.lppmbreadth": [[62, "module-PAMI.localPeriodicPattern.basic.LPPMBreadth"]], "pami.localperiodicpattern.basic.lppmdepth": [[62, "module-PAMI.localPeriodicPattern.basic.LPPMDepth"]], "pami.localperiodicpattern.basic.abstract": [[62, "module-PAMI.localPeriodicPattern.basic.abstract"]], "tree (class in pami.localperiodicpattern.basic.lppgrowth)": [[62, "PAMI.localPeriodicPattern.basic.LPPGrowth.Tree"]], "addtransaction() (pami.localperiodicpattern.basic.lppgrowth.tree method)": [[62, "PAMI.localPeriodicPattern.basic.LPPGrowth.Tree.addTransaction"]], "createprefixtree() (pami.localperiodicpattern.basic.lppgrowth.tree method)": [[62, "PAMI.localPeriodicPattern.basic.LPPGrowth.Tree.createPrefixTree"]], "deletenode() (pami.localperiodicpattern.basic.lppgrowth.tree method)": [[62, "PAMI.localPeriodicPattern.basic.LPPGrowth.Tree.deleteNode"]], "fixnodelinks() (pami.localperiodicpattern.basic.lppgrowth.tree method)": [[62, "PAMI.localPeriodicPattern.basic.LPPGrowth.Tree.fixNodeLinks"]], "getchild() (pami.localperiodicpattern.basic.lppgrowth.node method)": [[62, "PAMI.localPeriodicPattern.basic.LPPGrowth.Node.getChild"]], "getmemoryrss() (pami.localperiodicpattern.basic.lppgrowth.lppgrowth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPGrowth.LPPGrowth.getMemoryRSS"]], "getmemoryrss() (pami.localperiodicpattern.basic.lppmbreadth.lppmbreadth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPMBreadth.LPPMBreadth.getMemoryRSS"]], "getmemoryrss() (pami.localperiodicpattern.basic.lppmdepth.lppmdepth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPMDepth.LPPMDepth.getMemoryRSS"]], "getmemoryuss() (pami.localperiodicpattern.basic.lppgrowth.lppgrowth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPGrowth.LPPGrowth.getMemoryUSS"]], "getmemoryuss() (pami.localperiodicpattern.basic.lppmbreadth.lppmbreadth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPMBreadth.LPPMBreadth.getMemoryUSS"]], "getmemoryuss() (pami.localperiodicpattern.basic.lppmdepth.lppmdepth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPMDepth.LPPMDepth.getMemoryUSS"]], "getpatterns() (pami.localperiodicpattern.basic.lppgrowth.lppgrowth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPGrowth.LPPGrowth.getPatterns"]], "getpatterns() (pami.localperiodicpattern.basic.lppmbreadth.lppmbreadth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPMBreadth.LPPMBreadth.getPatterns"]], "getpatterns() (pami.localperiodicpattern.basic.lppmdepth.lppmdepth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPMDepth.LPPMDepth.getPatterns"]], "getpatternsasdataframe() (pami.localperiodicpattern.basic.lppgrowth.lppgrowth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPGrowth.LPPGrowth.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.localperiodicpattern.basic.lppmbreadth.lppmbreadth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPMBreadth.LPPMBreadth.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.localperiodicpattern.basic.lppmdepth.lppmdepth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPMDepth.LPPMDepth.getPatternsAsDataFrame"]], "getruntime() (pami.localperiodicpattern.basic.lppgrowth.lppgrowth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPGrowth.LPPGrowth.getRuntime"]], "getruntime() (pami.localperiodicpattern.basic.lppmbreadth.lppmbreadth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPMBreadth.LPPMBreadth.getRuntime"]], "getruntime() (pami.localperiodicpattern.basic.lppmdepth.lppmdepth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPMDepth.LPPMDepth.getRuntime"]], "mine() (pami.localperiodicpattern.basic.lppgrowth.lppgrowth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPGrowth.LPPGrowth.mine"]], "mine() (pami.localperiodicpattern.basic.lppmbreadth.lppmbreadth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPMBreadth.LPPMBreadth.mine"]], "mine() (pami.localperiodicpattern.basic.lppmdepth.lppmdepth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPMDepth.LPPMDepth.mine"]], "printresults() (pami.localperiodicpattern.basic.lppgrowth.lppgrowth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPGrowth.LPPGrowth.printResults"]], "printresults() (pami.localperiodicpattern.basic.lppmbreadth.lppmbreadth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPMBreadth.LPPMBreadth.printResults"]], "printresults() (pami.localperiodicpattern.basic.lppmdepth.lppmdepth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPMDepth.LPPMDepth.printResults"]], "save() (pami.localperiodicpattern.basic.lppgrowth.lppgrowth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPGrowth.LPPGrowth.save"]], "save() (pami.localperiodicpattern.basic.lppmbreadth.lppmbreadth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPMBreadth.LPPMBreadth.save"]], "save() (pami.localperiodicpattern.basic.lppmdepth.lppmdepth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPMDepth.LPPMDepth.save"]], "startmine() (pami.localperiodicpattern.basic.lppgrowth.lppgrowth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPGrowth.LPPGrowth.startMine"]], "startmine() (pami.localperiodicpattern.basic.lppmbreadth.lppmbreadth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPMBreadth.LPPMBreadth.startMine"]], "startmine() (pami.localperiodicpattern.basic.lppmdepth.lppmdepth method)": [[62, "PAMI.localPeriodicPattern.basic.LPPMDepth.LPPMDepth.startMine"]], "pami.multipleminimumsupportbasedfrequentpattern": [[63, "module-PAMI.multipleMinimumSupportBasedFrequentPattern"]], "cfpgrowth (class in pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowth)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth.CFPGrowth"]], "cfpgrowthplus (class in pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowthplus)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus.CFPGrowthPlus"]], "mine() (pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowth.cfpgrowth method)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth.CFPGrowth.Mine"]], "mine() (pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowthplus.cfpgrowthplus method)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus.CFPGrowthPlus.Mine"]], "pami.multipleminimumsupportbasedfrequentpattern.basic": [[64, "module-PAMI.multipleMinimumSupportBasedFrequentPattern.basic"]], "pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowth": [[64, "module-PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth"]], "pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowthplus": [[64, "module-PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus"]], "pami.multipleminimumsupportbasedfrequentpattern.basic.abstract": [[64, "module-PAMI.multipleMinimumSupportBasedFrequentPattern.basic.abstract"]], "getmemoryrss() (pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowth.cfpgrowth method)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth.CFPGrowth.getMemoryRSS"]], "getmemoryrss() (pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowthplus.cfpgrowthplus method)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus.CFPGrowthPlus.getMemoryRSS"]], "getmemoryuss() (pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowth.cfpgrowth method)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth.CFPGrowth.getMemoryUSS"]], "getmemoryuss() (pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowthplus.cfpgrowthplus method)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus.CFPGrowthPlus.getMemoryUSS"]], "getpatterns() (pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowth.cfpgrowth method)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth.CFPGrowth.getPatterns"]], "getpatterns() (pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowthplus.cfpgrowthplus method)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus.CFPGrowthPlus.getPatterns"]], "getpatternsasdataframe() (pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowth.cfpgrowth method)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth.CFPGrowth.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowthplus.cfpgrowthplus method)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus.CFPGrowthPlus.getPatternsAsDataFrame"]], "getruntime() (pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowth.cfpgrowth method)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth.CFPGrowth.getRuntime"]], "getruntime() (pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowthplus.cfpgrowthplus method)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus.CFPGrowthPlus.getRuntime"]], "printresults() (pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowth.cfpgrowth method)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth.CFPGrowth.printResults"]], "printresults() (pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowthplus.cfpgrowthplus method)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus.CFPGrowthPlus.printResults"]], "save() (pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowth.cfpgrowth method)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth.CFPGrowth.save"]], "save() (pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowthplus.cfpgrowthplus method)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus.CFPGrowthPlus.save"]], "startmine() (pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowth.cfpgrowth method)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowth.CFPGrowth.startMine"]], "startmine() (pami.multipleminimumsupportbasedfrequentpattern.basic.cfpgrowthplus.cfpgrowthplus method)": [[64, "PAMI.multipleMinimumSupportBasedFrequentPattern.basic.CFPGrowthPlus.CFPGrowthPlus.startMine"]], "pami.partialperiodicfrequentpattern": [[65, "module-PAMI.partialPeriodicFrequentPattern"]], "pami.partialperiodicfrequentpattern.basic": [[66, "module-PAMI.partialPeriodicFrequentPattern.basic"]], "pami.partialperiodicfrequentpattern.basic.abstract": [[66, "module-PAMI.partialPeriodicFrequentPattern.basic.abstract"]], "getmemoryrss() (pami.partialperiodicfrequentpattern.basic.abstract.partialperiodicpatterns method)": [[66, "PAMI.partialPeriodicFrequentPattern.basic.abstract.partialPeriodicPatterns.getMemoryRSS"]], "getmemoryuss() (pami.partialperiodicfrequentpattern.basic.abstract.partialperiodicpatterns method)": [[66, "PAMI.partialPeriodicFrequentPattern.basic.abstract.partialPeriodicPatterns.getMemoryUSS"]], "getpatterns() (pami.partialperiodicfrequentpattern.basic.abstract.partialperiodicpatterns method)": [[66, "PAMI.partialPeriodicFrequentPattern.basic.abstract.partialPeriodicPatterns.getPatterns"]], "getpatternsasdataframe() (pami.partialperiodicfrequentpattern.basic.abstract.partialperiodicpatterns method)": [[66, "PAMI.partialPeriodicFrequentPattern.basic.abstract.partialPeriodicPatterns.getPatternsAsDataFrame"]], "getruntime() (pami.partialperiodicfrequentpattern.basic.abstract.partialperiodicpatterns method)": [[66, "PAMI.partialPeriodicFrequentPattern.basic.abstract.partialPeriodicPatterns.getRuntime"]], "partialperiodicpatterns (class in pami.partialperiodicfrequentpattern.basic.abstract)": [[66, "PAMI.partialPeriodicFrequentPattern.basic.abstract.partialPeriodicPatterns"]], "printresults() (pami.partialperiodicfrequentpattern.basic.abstract.partialperiodicpatterns method)": [[66, "PAMI.partialPeriodicFrequentPattern.basic.abstract.partialPeriodicPatterns.printResults"]], "save() (pami.partialperiodicfrequentpattern.basic.abstract.partialperiodicpatterns method)": [[66, "PAMI.partialPeriodicFrequentPattern.basic.abstract.partialPeriodicPatterns.save"]], "startmine() (pami.partialperiodicfrequentpattern.basic.abstract.partialperiodicpatterns method)": [[66, "PAMI.partialPeriodicFrequentPattern.basic.abstract.partialPeriodicPatterns.startMine"]], "pami.partialperiodicpattern": [[67, "module-PAMI.partialPeriodicPattern"]], "mine() (pami.partialperiodicpattern.basic.ppp_eclat.ppp_eclat method)": [[68, "PAMI.partialPeriodicPattern.basic.PPP_ECLAT.PPP_ECLAT.Mine"]], "pami.partialperiodicpattern.basic": [[68, "module-PAMI.partialPeriodicPattern.basic"]], "pami.partialperiodicpattern.basic.gabstract": [[68, "module-PAMI.partialPeriodicPattern.basic.Gabstract"]], "pami.partialperiodicpattern.basic.pppgrowth": [[68, "module-PAMI.partialPeriodicPattern.basic.PPPGrowth"]], "pami.partialperiodicpattern.basic.ppp_eclat": [[68, "module-PAMI.partialPeriodicPattern.basic.PPP_ECLAT"]], "pami.partialperiodicpattern.basic.abstract": [[68, "module-PAMI.partialPeriodicPattern.basic.abstract"]], "pppgrowth (class in pami.partialperiodicpattern.basic.pppgrowth)": [[68, "PAMI.partialPeriodicPattern.basic.PPPGrowth.PPPGrowth"]], "ppp_eclat (class in pami.partialperiodicpattern.basic.ppp_eclat)": [[68, "PAMI.partialPeriodicPattern.basic.PPP_ECLAT.PPP_ECLAT"]], "getmemoryrss() (pami.partialperiodicpattern.basic.pppgrowth.pppgrowth method)": [[68, "PAMI.partialPeriodicPattern.basic.PPPGrowth.PPPGrowth.getMemoryRSS"]], "getmemoryrss() (pami.partialperiodicpattern.basic.ppp_eclat.ppp_eclat method)": [[68, "PAMI.partialPeriodicPattern.basic.PPP_ECLAT.PPP_ECLAT.getMemoryRSS"]], "getmemoryuss() (pami.partialperiodicpattern.basic.pppgrowth.pppgrowth method)": [[68, "PAMI.partialPeriodicPattern.basic.PPPGrowth.PPPGrowth.getMemoryUSS"]], "getmemoryuss() (pami.partialperiodicpattern.basic.ppp_eclat.ppp_eclat method)": [[68, "PAMI.partialPeriodicPattern.basic.PPP_ECLAT.PPP_ECLAT.getMemoryUSS"]], "getpatterns() (pami.partialperiodicpattern.basic.pppgrowth.pppgrowth method)": [[68, "PAMI.partialPeriodicPattern.basic.PPPGrowth.PPPGrowth.getPatterns"]], "getpatterns() (pami.partialperiodicpattern.basic.ppp_eclat.ppp_eclat method)": [[68, "PAMI.partialPeriodicPattern.basic.PPP_ECLAT.PPP_ECLAT.getPatterns"]], "getpatternsasdataframe() (pami.partialperiodicpattern.basic.pppgrowth.pppgrowth method)": [[68, "PAMI.partialPeriodicPattern.basic.PPPGrowth.PPPGrowth.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.partialperiodicpattern.basic.ppp_eclat.ppp_eclat method)": [[68, "PAMI.partialPeriodicPattern.basic.PPP_ECLAT.PPP_ECLAT.getPatternsAsDataFrame"]], "getruntime() (pami.partialperiodicpattern.basic.pppgrowth.pppgrowth method)": [[68, "PAMI.partialPeriodicPattern.basic.PPPGrowth.PPPGrowth.getRuntime"]], "getruntime() (pami.partialperiodicpattern.basic.ppp_eclat.ppp_eclat method)": [[68, "PAMI.partialPeriodicPattern.basic.PPP_ECLAT.PPP_ECLAT.getRuntime"]], "mine() (pami.partialperiodicpattern.basic.pppgrowth.pppgrowth method)": [[68, "PAMI.partialPeriodicPattern.basic.PPPGrowth.PPPGrowth.mine"]], "printresults() (pami.partialperiodicpattern.basic.pppgrowth.pppgrowth method)": [[68, "PAMI.partialPeriodicPattern.basic.PPPGrowth.PPPGrowth.printResults"]], "printresults() (pami.partialperiodicpattern.basic.ppp_eclat.ppp_eclat method)": [[68, "PAMI.partialPeriodicPattern.basic.PPP_ECLAT.PPP_ECLAT.printResults"]], "save() (pami.partialperiodicpattern.basic.pppgrowth.pppgrowth method)": [[68, "PAMI.partialPeriodicPattern.basic.PPPGrowth.PPPGrowth.save"]], "save() (pami.partialperiodicpattern.basic.ppp_eclat.ppp_eclat method)": [[68, "PAMI.partialPeriodicPattern.basic.PPP_ECLAT.PPP_ECLAT.save"]], "startmine() (pami.partialperiodicpattern.basic.pppgrowth.pppgrowth method)": [[68, "PAMI.partialPeriodicPattern.basic.PPPGrowth.PPPGrowth.startMine"]], "startmine() (pami.partialperiodicpattern.basic.ppp_eclat.ppp_eclat method)": [[68, "PAMI.partialPeriodicPattern.basic.PPP_ECLAT.PPP_ECLAT.startMine"]], "pami.partialperiodicpattern.closed": [[69, "module-PAMI.partialPeriodicPattern.closed"]], "pami.partialperiodicpattern.closed.pppclose": [[69, "module-PAMI.partialPeriodicPattern.closed.PPPClose"]], "pami.partialperiodicpattern.closed.abstract": [[69, "module-PAMI.partialPeriodicPattern.closed.abstract"]], "pppclose (class in pami.partialperiodicpattern.closed.pppclose)": [[69, "PAMI.partialPeriodicPattern.closed.PPPClose.PPPClose"]], "getmemoryrss() (pami.partialperiodicpattern.closed.pppclose.pppclose method)": [[69, "PAMI.partialPeriodicPattern.closed.PPPClose.PPPClose.getMemoryRSS"]], "getmemoryuss() (pami.partialperiodicpattern.closed.pppclose.pppclose method)": [[69, "PAMI.partialPeriodicPattern.closed.PPPClose.PPPClose.getMemoryUSS"]], "getpatterns() (pami.partialperiodicpattern.closed.pppclose.pppclose method)": [[69, "PAMI.partialPeriodicPattern.closed.PPPClose.PPPClose.getPatterns"]], "getpatternsasdataframe() (pami.partialperiodicpattern.closed.pppclose.pppclose method)": [[69, "PAMI.partialPeriodicPattern.closed.PPPClose.PPPClose.getPatternsAsDataFrame"]], "getruntime() (pami.partialperiodicpattern.closed.pppclose.pppclose method)": [[69, "PAMI.partialPeriodicPattern.closed.PPPClose.PPPClose.getRuntime"]], "mine() (pami.partialperiodicpattern.closed.pppclose.pppclose method)": [[69, "PAMI.partialPeriodicPattern.closed.PPPClose.PPPClose.mine"]], "printresults() (pami.partialperiodicpattern.closed.pppclose.pppclose method)": [[69, "PAMI.partialPeriodicPattern.closed.PPPClose.PPPClose.printResults"]], "save() (pami.partialperiodicpattern.closed.pppclose.pppclose method)": [[69, "PAMI.partialPeriodicPattern.closed.PPPClose.PPPClose.save"]], "startmine() (pami.partialperiodicpattern.closed.pppclose.pppclose method)": [[69, "PAMI.partialPeriodicPattern.closed.PPPClose.PPPClose.startMine"]], "pami.partialperiodicpattern.maximal": [[70, "module-PAMI.partialPeriodicPattern.maximal"]], "pami.partialperiodicpattern.maximal.abstract": [[70, "module-PAMI.partialPeriodicPattern.maximal.abstract"]], "pami.partialperiodicpattern.pyspark": [[71, "module-PAMI.partialPeriodicPattern.pyspark"]], "pami.partialperiodicpattern.pyspark.abstract": [[71, "module-PAMI.partialPeriodicPattern.pyspark.abstract"]], "pami.partialperiodicpattern.topk": [[72, "module-PAMI.partialPeriodicPattern.topk"]], "pami.partialperiodicpattern.topk.abstract": [[72, "module-PAMI.partialPeriodicPattern.topk.abstract"]], "pami.partialperiodicpattern.topk.k3pminer": [[72, "module-PAMI.partialPeriodicPattern.topk.k3PMiner"]], "getmemoryrss() (pami.partialperiodicpattern.topk.abstract.partialperiodicpatterns method)": [[72, "PAMI.partialPeriodicPattern.topk.abstract.partialPeriodicPatterns.getMemoryRSS"]], "getmemoryrss() (pami.partialperiodicpattern.topk.k3pminer.k3pminer method)": [[72, "PAMI.partialPeriodicPattern.topk.k3PMiner.k3PMiner.getMemoryRSS"]], "getmemoryuss() (pami.partialperiodicpattern.topk.abstract.partialperiodicpatterns method)": [[72, "PAMI.partialPeriodicPattern.topk.abstract.partialPeriodicPatterns.getMemoryUSS"]], "getmemoryuss() (pami.partialperiodicpattern.topk.k3pminer.k3pminer method)": [[72, "PAMI.partialPeriodicPattern.topk.k3PMiner.k3PMiner.getMemoryUSS"]], "getpatterns() (pami.partialperiodicpattern.topk.abstract.partialperiodicpatterns method)": [[72, "PAMI.partialPeriodicPattern.topk.abstract.partialPeriodicPatterns.getPatterns"]], "getpatterns() (pami.partialperiodicpattern.topk.k3pminer.k3pminer method)": [[72, "PAMI.partialPeriodicPattern.topk.k3PMiner.k3PMiner.getPatterns"]], "getpatternsasdataframe() (pami.partialperiodicpattern.topk.abstract.partialperiodicpatterns method)": [[72, "PAMI.partialPeriodicPattern.topk.abstract.partialPeriodicPatterns.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.partialperiodicpattern.topk.k3pminer.k3pminer method)": [[72, "PAMI.partialPeriodicPattern.topk.k3PMiner.k3PMiner.getPatternsAsDataFrame"]], "getruntime() (pami.partialperiodicpattern.topk.abstract.partialperiodicpatterns method)": [[72, "PAMI.partialPeriodicPattern.topk.abstract.partialPeriodicPatterns.getRuntime"]], "getruntime() (pami.partialperiodicpattern.topk.k3pminer.k3pminer method)": [[72, "PAMI.partialPeriodicPattern.topk.k3PMiner.k3PMiner.getRuntime"]], "k3pminer (class in pami.partialperiodicpattern.topk.k3pminer)": [[72, "PAMI.partialPeriodicPattern.topk.k3PMiner.k3PMiner"]], "mine() (pami.partialperiodicpattern.topk.k3pminer.k3pminer method)": [[72, "PAMI.partialPeriodicPattern.topk.k3PMiner.k3PMiner.mine"]], "partialperiodicpatterns (class in pami.partialperiodicpattern.topk.abstract)": [[72, "PAMI.partialPeriodicPattern.topk.abstract.partialPeriodicPatterns"]], "printresults() (pami.partialperiodicpattern.topk.abstract.partialperiodicpatterns method)": [[72, "PAMI.partialPeriodicPattern.topk.abstract.partialPeriodicPatterns.printResults"]], "printresults() (pami.partialperiodicpattern.topk.k3pminer.k3pminer method)": [[72, "PAMI.partialPeriodicPattern.topk.k3PMiner.k3PMiner.printResults"]], "save() (pami.partialperiodicpattern.topk.abstract.partialperiodicpatterns method)": [[72, "PAMI.partialPeriodicPattern.topk.abstract.partialPeriodicPatterns.save"]], "save() (pami.partialperiodicpattern.topk.k3pminer.k3pminer method)": [[72, "PAMI.partialPeriodicPattern.topk.k3PMiner.k3PMiner.save"]], "startmine() (pami.partialperiodicpattern.topk.abstract.partialperiodicpatterns method)": [[72, "PAMI.partialPeriodicPattern.topk.abstract.partialPeriodicPatterns.startMine"]], "startmine() (pami.partialperiodicpattern.topk.k3pminer.k3pminer method)": [[72, "PAMI.partialPeriodicPattern.topk.k3PMiner.k3PMiner.startMine"]], "mine() (pami.partialperiodicpatterninmultipletimeseries.ppgrowth.ppgrowth method)": [[73, "PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth.PPGrowth.Mine"]], "pami.partialperiodicpatterninmultipletimeseries": [[73, "module-PAMI.partialPeriodicPatternInMultipleTimeSeries"]], "pami.partialperiodicpatterninmultipletimeseries.ppgrowth": [[73, "module-PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth"]], "pami.partialperiodicpatterninmultipletimeseries.abstract": [[73, "module-PAMI.partialPeriodicPatternInMultipleTimeSeries.abstract"]], "ppgrowth (class in pami.partialperiodicpatterninmultipletimeseries.ppgrowth)": [[73, "PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth.PPGrowth"]], "getmemoryrss() (pami.partialperiodicpatterninmultipletimeseries.ppgrowth.ppgrowth method)": [[73, "PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth.PPGrowth.getMemoryRSS"]], "getmemoryuss() (pami.partialperiodicpatterninmultipletimeseries.ppgrowth.ppgrowth method)": [[73, "PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth.PPGrowth.getMemoryUSS"]], "getpatterns() (pami.partialperiodicpatterninmultipletimeseries.ppgrowth.ppgrowth method)": [[73, "PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth.PPGrowth.getPatterns"]], "getpatternsasdataframe() (pami.partialperiodicpatterninmultipletimeseries.ppgrowth.ppgrowth method)": [[73, "PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth.PPGrowth.getPatternsAsDataFrame"]], "getruntime() (pami.partialperiodicpatterninmultipletimeseries.ppgrowth.ppgrowth method)": [[73, "PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth.PPGrowth.getRuntime"]], "printresults() (pami.partialperiodicpatterninmultipletimeseries.ppgrowth.ppgrowth method)": [[73, "PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth.PPGrowth.printResults"]], "save() (pami.partialperiodicpatterninmultipletimeseries.ppgrowth.ppgrowth method)": [[73, "PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth.PPGrowth.save"]], "startmine() (pami.partialperiodicpatterninmultipletimeseries.ppgrowth.ppgrowth method)": [[73, "PAMI.partialPeriodicPatternInMultipleTimeSeries.PPGrowth.PPGrowth.startMine"]], "pami.periodiccorrelatedpattern": [[74, "module-PAMI.periodicCorrelatedPattern"]], "epcpgrowth (class in pami.periodiccorrelatedpattern.basic.epcpgrowth)": [[75, "PAMI.periodicCorrelatedPattern.basic.EPCPGrowth.EPCPGrowth"]], "pami.periodiccorrelatedpattern.basic": [[75, "module-PAMI.periodicCorrelatedPattern.basic"]], "pami.periodiccorrelatedpattern.basic.epcpgrowth": [[75, "module-PAMI.periodicCorrelatedPattern.basic.EPCPGrowth"]], "pami.periodiccorrelatedpattern.basic.abstract": [[75, "module-PAMI.periodicCorrelatedPattern.basic.abstract"]], "getmemoryrss() (pami.periodiccorrelatedpattern.basic.epcpgrowth.epcpgrowth method)": [[75, "PAMI.periodicCorrelatedPattern.basic.EPCPGrowth.EPCPGrowth.getMemoryRSS"]], "getmemoryuss() (pami.periodiccorrelatedpattern.basic.epcpgrowth.epcpgrowth method)": [[75, "PAMI.periodicCorrelatedPattern.basic.EPCPGrowth.EPCPGrowth.getMemoryUSS"]], "getpatterns() (pami.periodiccorrelatedpattern.basic.epcpgrowth.epcpgrowth method)": [[75, "PAMI.periodicCorrelatedPattern.basic.EPCPGrowth.EPCPGrowth.getPatterns"]], "getpatternsasdataframe() (pami.periodiccorrelatedpattern.basic.epcpgrowth.epcpgrowth method)": [[75, "PAMI.periodicCorrelatedPattern.basic.EPCPGrowth.EPCPGrowth.getPatternsAsDataFrame"]], "getruntime() (pami.periodiccorrelatedpattern.basic.epcpgrowth.epcpgrowth method)": [[75, "PAMI.periodicCorrelatedPattern.basic.EPCPGrowth.EPCPGrowth.getRuntime"]], "printresults() (pami.periodiccorrelatedpattern.basic.epcpgrowth.epcpgrowth method)": [[75, "PAMI.periodicCorrelatedPattern.basic.EPCPGrowth.EPCPGrowth.printResults"]], "save() (pami.periodiccorrelatedpattern.basic.epcpgrowth.epcpgrowth method)": [[75, "PAMI.periodicCorrelatedPattern.basic.EPCPGrowth.EPCPGrowth.save"]], "startmine() (pami.periodiccorrelatedpattern.basic.epcpgrowth.epcpgrowth method)": [[75, "PAMI.periodicCorrelatedPattern.basic.EPCPGrowth.EPCPGrowth.startMine"]], "pami.periodicfrequentpattern": [[76, "module-PAMI.periodicFrequentPattern"]], "mine() (pami.periodicfrequentpattern.basic.pfeclat.pfeclat method)": [[77, "PAMI.periodicFrequentPattern.basic.PFECLAT.PFECLAT.Mine"]], "mine() (pami.periodicfrequentpattern.basic.pfpgrowth.pfpgrowth method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowth.PFPGrowth.Mine"]], "mine() (pami.periodicfrequentpattern.basic.psgrowth.psgrowth method)": [[77, "PAMI.periodicFrequentPattern.basic.PSGrowth.PSGrowth.Mine"]], "node (class in pami.periodicfrequentpattern.basic.psgrowth)": [[77, "PAMI.periodicFrequentPattern.basic.PSGrowth.Node"]], "pami.periodicfrequentpattern.basic": [[77, "module-PAMI.periodicFrequentPattern.basic"]], "pami.periodicfrequentpattern.basic.pfeclat": [[77, "module-PAMI.periodicFrequentPattern.basic.PFECLAT"]], "pami.periodicfrequentpattern.basic.pfpgrowth": [[77, "module-PAMI.periodicFrequentPattern.basic.PFPGrowth"]], "pami.periodicfrequentpattern.basic.pfpgrowthplus": [[77, "module-PAMI.periodicFrequentPattern.basic.PFPGrowthPlus"]], "pami.periodicfrequentpattern.basic.pfpmc": [[77, "module-PAMI.periodicFrequentPattern.basic.PFPMC"]], "pami.periodicfrequentpattern.basic.psgrowth": [[77, "module-PAMI.periodicFrequentPattern.basic.PSGrowth"]], "pami.periodicfrequentpattern.basic.abstract": [[77, "module-PAMI.periodicFrequentPattern.basic.abstract"]], "pfeclat (class in pami.periodicfrequentpattern.basic.pfeclat)": [[77, "PAMI.periodicFrequentPattern.basic.PFECLAT.PFECLAT"]], "pfpgrowth (class in pami.periodicfrequentpattern.basic.pfpgrowth)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowth.PFPGrowth"]], "pfpgrowthplus (class in pami.periodicfrequentpattern.basic.pfpgrowthplus)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowthPlus.PFPGrowthPlus"]], "pfpmc (class in pami.periodicfrequentpattern.basic.pfpmc)": [[77, "PAMI.periodicFrequentPattern.basic.PFPMC.PFPMC"]], "psgrowth (class in pami.periodicfrequentpattern.basic.psgrowth)": [[77, "PAMI.periodicFrequentPattern.basic.PSGrowth.PSGrowth"]], "addchild() (pami.periodicfrequentpattern.basic.psgrowth.node method)": [[77, "PAMI.periodicFrequentPattern.basic.PSGrowth.Node.addChild"]], "conditionaltransactions() (in module pami.periodicfrequentpattern.basic.psgrowth)": [[77, "PAMI.periodicFrequentPattern.basic.PSGrowth.conditionalTransactions"]], "getmemoryrss() (pami.periodicfrequentpattern.basic.pfeclat.pfeclat method)": [[77, "PAMI.periodicFrequentPattern.basic.PFECLAT.PFECLAT.getMemoryRSS"]], "getmemoryrss() (pami.periodicfrequentpattern.basic.pfpgrowth.pfpgrowth method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowth.PFPGrowth.getMemoryRSS"]], "getmemoryrss() (pami.periodicfrequentpattern.basic.pfpgrowthplus.pfpgrowthplus method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowthPlus.PFPGrowthPlus.getMemoryRSS"]], "getmemoryrss() (pami.periodicfrequentpattern.basic.pfpmc.pfpmc method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPMC.PFPMC.getMemoryRSS"]], "getmemoryrss() (pami.periodicfrequentpattern.basic.psgrowth.psgrowth method)": [[77, "PAMI.periodicFrequentPattern.basic.PSGrowth.PSGrowth.getMemoryRSS"]], "getmemoryuss() (pami.periodicfrequentpattern.basic.pfeclat.pfeclat method)": [[77, "PAMI.periodicFrequentPattern.basic.PFECLAT.PFECLAT.getMemoryUSS"]], "getmemoryuss() (pami.periodicfrequentpattern.basic.pfpgrowth.pfpgrowth method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowth.PFPGrowth.getMemoryUSS"]], "getmemoryuss() (pami.periodicfrequentpattern.basic.pfpgrowthplus.pfpgrowthplus method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowthPlus.PFPGrowthPlus.getMemoryUSS"]], "getmemoryuss() (pami.periodicfrequentpattern.basic.pfpmc.pfpmc method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPMC.PFPMC.getMemoryUSS"]], "getmemoryuss() (pami.periodicfrequentpattern.basic.psgrowth.psgrowth method)": [[77, "PAMI.periodicFrequentPattern.basic.PSGrowth.PSGrowth.getMemoryUSS"]], "getpatterns() (pami.periodicfrequentpattern.basic.pfeclat.pfeclat method)": [[77, "PAMI.periodicFrequentPattern.basic.PFECLAT.PFECLAT.getPatterns"]], "getpatterns() (pami.periodicfrequentpattern.basic.pfpgrowth.pfpgrowth method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowth.PFPGrowth.getPatterns"]], "getpatterns() (pami.periodicfrequentpattern.basic.pfpgrowthplus.pfpgrowthplus method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowthPlus.PFPGrowthPlus.getPatterns"]], "getpatterns() (pami.periodicfrequentpattern.basic.pfpmc.pfpmc method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPMC.PFPMC.getPatterns"]], "getpatterns() (pami.periodicfrequentpattern.basic.psgrowth.psgrowth method)": [[77, "PAMI.periodicFrequentPattern.basic.PSGrowth.PSGrowth.getPatterns"]], "getpatternsasdataframe() (pami.periodicfrequentpattern.basic.pfeclat.pfeclat method)": [[77, "PAMI.periodicFrequentPattern.basic.PFECLAT.PFECLAT.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.periodicfrequentpattern.basic.pfpgrowth.pfpgrowth method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowth.PFPGrowth.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.periodicfrequentpattern.basic.pfpgrowthplus.pfpgrowthplus method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowthPlus.PFPGrowthPlus.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.periodicfrequentpattern.basic.pfpmc.pfpmc method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPMC.PFPMC.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.periodicfrequentpattern.basic.psgrowth.psgrowth method)": [[77, "PAMI.periodicFrequentPattern.basic.PSGrowth.PSGrowth.getPatternsAsDataFrame"]], "getperiodandsupport() (in module pami.periodicfrequentpattern.basic.psgrowth)": [[77, "PAMI.periodicFrequentPattern.basic.PSGrowth.getPeriodAndSupport"]], "getruntime() (pami.periodicfrequentpattern.basic.pfeclat.pfeclat method)": [[77, "PAMI.periodicFrequentPattern.basic.PFECLAT.PFECLAT.getRuntime"]], "getruntime() (pami.periodicfrequentpattern.basic.pfpgrowth.pfpgrowth method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowth.PFPGrowth.getRuntime"]], "getruntime() (pami.periodicfrequentpattern.basic.pfpgrowthplus.pfpgrowthplus method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowthPlus.PFPGrowthPlus.getRuntime"]], "getruntime() (pami.periodicfrequentpattern.basic.pfpmc.pfpmc method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPMC.PFPMC.getRuntime"]], "getruntime() (pami.periodicfrequentpattern.basic.psgrowth.psgrowth method)": [[77, "PAMI.periodicFrequentPattern.basic.PSGrowth.PSGrowth.getRuntime"]], "printresults() (pami.periodicfrequentpattern.basic.pfeclat.pfeclat method)": [[77, "PAMI.periodicFrequentPattern.basic.PFECLAT.PFECLAT.printResults"]], "printresults() (pami.periodicfrequentpattern.basic.pfpgrowth.pfpgrowth method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowth.PFPGrowth.printResults"]], "printresults() (pami.periodicfrequentpattern.basic.pfpgrowthplus.pfpgrowthplus method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowthPlus.PFPGrowthPlus.printResults"]], "printresults() (pami.periodicfrequentpattern.basic.pfpmc.pfpmc method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPMC.PFPMC.printResults"]], "printresults() (pami.periodicfrequentpattern.basic.psgrowth.psgrowth method)": [[77, "PAMI.periodicFrequentPattern.basic.PSGrowth.PSGrowth.printResults"]], "save() (pami.periodicfrequentpattern.basic.pfeclat.pfeclat method)": [[77, "PAMI.periodicFrequentPattern.basic.PFECLAT.PFECLAT.save"]], "save() (pami.periodicfrequentpattern.basic.pfpgrowth.pfpgrowth method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowth.PFPGrowth.save"]], "save() (pami.periodicfrequentpattern.basic.pfpgrowthplus.pfpgrowthplus method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowthPlus.PFPGrowthPlus.save"]], "save() (pami.periodicfrequentpattern.basic.pfpmc.pfpmc method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPMC.PFPMC.save"]], "save() (pami.periodicfrequentpattern.basic.psgrowth.psgrowth method)": [[77, "PAMI.periodicFrequentPattern.basic.PSGrowth.PSGrowth.save"]], "startmine() (pami.periodicfrequentpattern.basic.pfeclat.pfeclat method)": [[77, "PAMI.periodicFrequentPattern.basic.PFECLAT.PFECLAT.startMine"]], "startmine() (pami.periodicfrequentpattern.basic.pfpgrowth.pfpgrowth method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowth.PFPGrowth.startMine"]], "startmine() (pami.periodicfrequentpattern.basic.pfpgrowthplus.pfpgrowthplus method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPGrowthPlus.PFPGrowthPlus.startMine"]], "startmine() (pami.periodicfrequentpattern.basic.pfpmc.pfpmc method)": [[77, "PAMI.periodicFrequentPattern.basic.PFPMC.PFPMC.startMine"]], "startmine() (pami.periodicfrequentpattern.basic.psgrowth.psgrowth method)": [[77, "PAMI.periodicFrequentPattern.basic.PSGrowth.PSGrowth.startMine"]], "cpfpminer (class in pami.periodicfrequentpattern.closed.cpfpminer)": [[78, "PAMI.periodicFrequentPattern.closed.CPFPMiner.CPFPMiner"]], "mine() (pami.periodicfrequentpattern.closed.cpfpminer.cpfpminer method)": [[78, "PAMI.periodicFrequentPattern.closed.CPFPMiner.CPFPMiner.Mine"]], "pami.periodicfrequentpattern.closed": [[78, "module-PAMI.periodicFrequentPattern.closed"]], "pami.periodicfrequentpattern.closed.cpfpminer": [[78, "module-PAMI.periodicFrequentPattern.closed.CPFPMiner"]], "pami.periodicfrequentpattern.closed.abstract": [[78, "module-PAMI.periodicFrequentPattern.closed.abstract"]], "getmemoryrss() (pami.periodicfrequentpattern.closed.cpfpminer.cpfpminer method)": [[78, "PAMI.periodicFrequentPattern.closed.CPFPMiner.CPFPMiner.getMemoryRSS"]], "getmemoryuss() (pami.periodicfrequentpattern.closed.cpfpminer.cpfpminer method)": [[78, "PAMI.periodicFrequentPattern.closed.CPFPMiner.CPFPMiner.getMemoryUSS"]], "getpatterns() (pami.periodicfrequentpattern.closed.cpfpminer.cpfpminer method)": [[78, "PAMI.periodicFrequentPattern.closed.CPFPMiner.CPFPMiner.getPatterns"]], "getpatternsasdataframe() (pami.periodicfrequentpattern.closed.cpfpminer.cpfpminer method)": [[78, "PAMI.periodicFrequentPattern.closed.CPFPMiner.CPFPMiner.getPatternsAsDataFrame"]], "getruntime() (pami.periodicfrequentpattern.closed.cpfpminer.cpfpminer method)": [[78, "PAMI.periodicFrequentPattern.closed.CPFPMiner.CPFPMiner.getRuntime"]], "printresults() (pami.periodicfrequentpattern.closed.cpfpminer.cpfpminer method)": [[78, "PAMI.periodicFrequentPattern.closed.CPFPMiner.CPFPMiner.printResults"]], "save() (pami.periodicfrequentpattern.closed.cpfpminer.cpfpminer method)": [[78, "PAMI.periodicFrequentPattern.closed.CPFPMiner.CPFPMiner.save"]], "startmine() (pami.periodicfrequentpattern.closed.cpfpminer.cpfpminer method)": [[78, "PAMI.periodicFrequentPattern.closed.CPFPMiner.CPFPMiner.startMine"]], "pami.periodicfrequentpattern.cuda": [[79, "module-PAMI.periodicFrequentPattern.cuda"]], "maxpfgrowth (class in pami.periodicfrequentpattern.maximal.maxpfgrowth)": [[80, "PAMI.periodicFrequentPattern.maximal.MaxPFGrowth.MaxPFGrowth"]], "mine() (pami.periodicfrequentpattern.maximal.maxpfgrowth.maxpfgrowth method)": [[80, "PAMI.periodicFrequentPattern.maximal.MaxPFGrowth.MaxPFGrowth.Mine"]], "pami.periodicfrequentpattern.maximal": [[80, "module-PAMI.periodicFrequentPattern.maximal"]], "pami.periodicfrequentpattern.maximal.maxpfgrowth": [[80, "module-PAMI.periodicFrequentPattern.maximal.MaxPFGrowth"]], "pami.periodicfrequentpattern.maximal.abstract": [[80, "module-PAMI.periodicFrequentPattern.maximal.abstract"]], "getmemoryrss() (pami.periodicfrequentpattern.maximal.maxpfgrowth.maxpfgrowth method)": [[80, "PAMI.periodicFrequentPattern.maximal.MaxPFGrowth.MaxPFGrowth.getMemoryRSS"]], "getmemoryuss() (pami.periodicfrequentpattern.maximal.maxpfgrowth.maxpfgrowth method)": [[80, "PAMI.periodicFrequentPattern.maximal.MaxPFGrowth.MaxPFGrowth.getMemoryUSS"]], "getpatterns() (pami.periodicfrequentpattern.maximal.maxpfgrowth.maxpfgrowth method)": [[80, "PAMI.periodicFrequentPattern.maximal.MaxPFGrowth.MaxPFGrowth.getPatterns"]], "getpatternsasdataframe() (pami.periodicfrequentpattern.maximal.maxpfgrowth.maxpfgrowth method)": [[80, "PAMI.periodicFrequentPattern.maximal.MaxPFGrowth.MaxPFGrowth.getPatternsAsDataFrame"]], "getruntime() (pami.periodicfrequentpattern.maximal.maxpfgrowth.maxpfgrowth method)": [[80, "PAMI.periodicFrequentPattern.maximal.MaxPFGrowth.MaxPFGrowth.getRuntime"]], "printresults() (pami.periodicfrequentpattern.maximal.maxpfgrowth.maxpfgrowth method)": [[80, "PAMI.periodicFrequentPattern.maximal.MaxPFGrowth.MaxPFGrowth.printResults"]], "save() (pami.periodicfrequentpattern.maximal.maxpfgrowth.maxpfgrowth method)": [[80, "PAMI.periodicFrequentPattern.maximal.MaxPFGrowth.MaxPFGrowth.save"]], "startmine() (pami.periodicfrequentpattern.maximal.maxpfgrowth.maxpfgrowth method)": [[80, "PAMI.periodicFrequentPattern.maximal.MaxPFGrowth.MaxPFGrowth.startMine"]], "pami.periodicfrequentpattern.pyspark": [[81, "module-PAMI.periodicFrequentPattern.pyspark"]], "pami.periodicfrequentpattern.topk": [[82, "module-PAMI.periodicFrequentPattern.topk"]], "mine() (pami.periodicfrequentpattern.topk.topkpfp.topkpfp.topkpfpgrowth method)": [[83, "PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP.TopkPFPGrowth.Mine"]], "pami.periodicfrequentpattern.topk.topkpfp": [[83, "module-PAMI.periodicFrequentPattern.topk.TopkPFP"]], "pami.periodicfrequentpattern.topk.topkpfp.topkpfp": [[83, "module-PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP"]], "pami.periodicfrequentpattern.topk.topkpfp.abstract": [[83, "module-PAMI.periodicFrequentPattern.topk.TopkPFP.abstract"]], "topkpfpgrowth (class in pami.periodicfrequentpattern.topk.topkpfp.topkpfp)": [[83, "PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP.TopkPFPGrowth"]], "getmemoryrss() (pami.periodicfrequentpattern.topk.topkpfp.topkpfp.topkpfpgrowth method)": [[83, "PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP.TopkPFPGrowth.getMemoryRSS"]], "getmemoryuss() (pami.periodicfrequentpattern.topk.topkpfp.topkpfp.topkpfpgrowth method)": [[83, "PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP.TopkPFPGrowth.getMemoryUSS"]], "getpatterns() (pami.periodicfrequentpattern.topk.topkpfp.topkpfp.topkpfpgrowth method)": [[83, "PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP.TopkPFPGrowth.getPatterns"]], "getpatternsasdataframe() (pami.periodicfrequentpattern.topk.topkpfp.topkpfp.topkpfpgrowth method)": [[83, "PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP.TopkPFPGrowth.getPatternsAsDataFrame"]], "getruntime() (pami.periodicfrequentpattern.topk.topkpfp.topkpfp.topkpfpgrowth method)": [[83, "PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP.TopkPFPGrowth.getRuntime"]], "printresults() (pami.periodicfrequentpattern.topk.topkpfp.topkpfp.topkpfpgrowth method)": [[83, "PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP.TopkPFPGrowth.printResults"]], "save() (pami.periodicfrequentpattern.topk.topkpfp.topkpfp.topkpfpgrowth method)": [[83, "PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP.TopkPFPGrowth.save"]], "startmine() (pami.periodicfrequentpattern.topk.topkpfp.topkpfp.topkpfpgrowth method)": [[83, "PAMI.periodicFrequentPattern.topk.TopkPFP.TopkPFP.TopkPFPGrowth.startMine"]], "pami.periodicfrequentpattern.topk.kpfpminer": [[84, "module-PAMI.periodicFrequentPattern.topk.kPFPMiner"]], "pami.periodicfrequentpattern.topk.kpfpminer.abstract": [[84, "module-PAMI.periodicFrequentPattern.topk.kPFPMiner.abstract"]], "pami.periodicfrequentpattern.topk.kpfpminer.kpfpminer": [[84, "module-PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner"]], "getmemoryrss() (pami.periodicfrequentpattern.topk.kpfpminer.kpfpminer.kpfpminer method)": [[84, "PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner.kPFPMiner.getMemoryRSS"]], "getmemoryuss() (pami.periodicfrequentpattern.topk.kpfpminer.kpfpminer.kpfpminer method)": [[84, "PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner.kPFPMiner.getMemoryUSS"]], "getpatterns() (pami.periodicfrequentpattern.topk.kpfpminer.kpfpminer.kpfpminer method)": [[84, "PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner.kPFPMiner.getPatterns"]], "getpatternsasdataframe() (pami.periodicfrequentpattern.topk.kpfpminer.kpfpminer.kpfpminer method)": [[84, "PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner.kPFPMiner.getPatternsAsDataFrame"]], "getper_sup() (pami.periodicfrequentpattern.topk.kpfpminer.kpfpminer.kpfpminer method)": [[84, "PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner.kPFPMiner.getPer_Sup"]], "getruntime() (pami.periodicfrequentpattern.topk.kpfpminer.kpfpminer.kpfpminer method)": [[84, "PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner.kPFPMiner.getRuntime"]], "kpfpminer (class in pami.periodicfrequentpattern.topk.kpfpminer.kpfpminer)": [[84, "PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner.kPFPMiner"]], "lno (pami.periodicfrequentpattern.topk.kpfpminer.kpfpminer.kpfpminer attribute)": [[84, "PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner.kPFPMiner.lno"]], "printresults() (pami.periodicfrequentpattern.topk.kpfpminer.kpfpminer.kpfpminer method)": [[84, "PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner.kPFPMiner.printResults"]], "save() (pami.periodicfrequentpattern.topk.kpfpminer.kpfpminer.kpfpminer method)": [[84, "PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner.kPFPMiner.save"]], "startmine() (pami.periodicfrequentpattern.topk.kpfpminer.kpfpminer.kpfpminer method)": [[84, "PAMI.periodicFrequentPattern.topk.kPFPMiner.kPFPMiner.kPFPMiner.startMine"]], "pami.recurringpattern": [[85, "module-PAMI.recurringPattern"]], "mine() (pami.recurringpattern.basic.rpgrowth.rpgrowth method)": [[86, "PAMI.recurringPattern.basic.RPGrowth.RPGrowth.Mine"]], "pami.recurringpattern.basic": [[86, "module-PAMI.recurringPattern.basic"]], "pami.recurringpattern.basic.rpgrowth": [[86, "module-PAMI.recurringPattern.basic.RPGrowth"]], "pami.recurringpattern.basic.abstract": [[86, "module-PAMI.recurringPattern.basic.abstract"]], "rpgrowth (class in pami.recurringpattern.basic.rpgrowth)": [[86, "PAMI.recurringPattern.basic.RPGrowth.RPGrowth"]], "getmemoryrss() (pami.recurringpattern.basic.rpgrowth.rpgrowth method)": [[86, "PAMI.recurringPattern.basic.RPGrowth.RPGrowth.getMemoryRSS"]], "getmemoryuss() (pami.recurringpattern.basic.rpgrowth.rpgrowth method)": [[86, "PAMI.recurringPattern.basic.RPGrowth.RPGrowth.getMemoryUSS"]], "getpatterns() (pami.recurringpattern.basic.rpgrowth.rpgrowth method)": [[86, "PAMI.recurringPattern.basic.RPGrowth.RPGrowth.getPatterns"]], "getpatternsasdataframe() (pami.recurringpattern.basic.rpgrowth.rpgrowth method)": [[86, "PAMI.recurringPattern.basic.RPGrowth.RPGrowth.getPatternsAsDataFrame"]], "getruntime() (pami.recurringpattern.basic.rpgrowth.rpgrowth method)": [[86, "PAMI.recurringPattern.basic.RPGrowth.RPGrowth.getRuntime"]], "printresults() (pami.recurringpattern.basic.rpgrowth.rpgrowth method)": [[86, "PAMI.recurringPattern.basic.RPGrowth.RPGrowth.printResults"]], "save() (pami.recurringpattern.basic.rpgrowth.rpgrowth method)": [[86, "PAMI.recurringPattern.basic.RPGrowth.RPGrowth.save"]], "startmine() (pami.recurringpattern.basic.rpgrowth.rpgrowth method)": [[86, "PAMI.recurringPattern.basic.RPGrowth.RPGrowth.startMine"]], "pami.relativefrequentpattern": [[87, "module-PAMI.relativeFrequentPattern"]], "mine() (pami.relativefrequentpattern.basic.rsfpgrowth.rsfpgrowth method)": [[88, "PAMI.relativeFrequentPattern.basic.RSFPGrowth.RSFPGrowth.Mine"]], "pami.relativefrequentpattern.basic": [[88, "module-PAMI.relativeFrequentPattern.basic"]], "pami.relativefrequentpattern.basic.rsfpgrowth": [[88, "module-PAMI.relativeFrequentPattern.basic.RSFPGrowth"]], "pami.relativefrequentpattern.basic.abstract": [[88, "module-PAMI.relativeFrequentPattern.basic.abstract"]], "rsfpgrowth (class in pami.relativefrequentpattern.basic.rsfpgrowth)": [[88, "PAMI.relativeFrequentPattern.basic.RSFPGrowth.RSFPGrowth"]], "getmemoryrss() (pami.relativefrequentpattern.basic.rsfpgrowth.rsfpgrowth method)": [[88, "PAMI.relativeFrequentPattern.basic.RSFPGrowth.RSFPGrowth.getMemoryRSS"]], "getmemoryuss() (pami.relativefrequentpattern.basic.rsfpgrowth.rsfpgrowth method)": [[88, "PAMI.relativeFrequentPattern.basic.RSFPGrowth.RSFPGrowth.getMemoryUSS"]], "getpatterns() (pami.relativefrequentpattern.basic.rsfpgrowth.rsfpgrowth method)": [[88, "PAMI.relativeFrequentPattern.basic.RSFPGrowth.RSFPGrowth.getPatterns"]], "getpatternsasdataframe() (pami.relativefrequentpattern.basic.rsfpgrowth.rsfpgrowth method)": [[88, "PAMI.relativeFrequentPattern.basic.RSFPGrowth.RSFPGrowth.getPatternsAsDataFrame"]], "getruntime() (pami.relativefrequentpattern.basic.rsfpgrowth.rsfpgrowth method)": [[88, "PAMI.relativeFrequentPattern.basic.RSFPGrowth.RSFPGrowth.getRuntime"]], "printresults() (pami.relativefrequentpattern.basic.rsfpgrowth.rsfpgrowth method)": [[88, "PAMI.relativeFrequentPattern.basic.RSFPGrowth.RSFPGrowth.printResults"]], "save() (pami.relativefrequentpattern.basic.rsfpgrowth.rsfpgrowth method)": [[88, "PAMI.relativeFrequentPattern.basic.RSFPGrowth.RSFPGrowth.save"]], "startmine() (pami.relativefrequentpattern.basic.rsfpgrowth.rsfpgrowth method)": [[88, "PAMI.relativeFrequentPattern.basic.RSFPGrowth.RSFPGrowth.startMine"]], "pami.relativehighutilitypattern": [[89, "module-PAMI.relativeHighUtilityPattern"]], "pami.relativehighutilitypattern.basic": [[90, "module-PAMI.relativeHighUtilityPattern.basic"]], "pami.relativehighutilitypattern.basic.rhuim": [[90, "module-PAMI.relativeHighUtilityPattern.basic.RHUIM"]], "pami.relativehighutilitypattern.basic.abstract": [[90, "module-PAMI.relativeHighUtilityPattern.basic.abstract"]], "rhuim (class in pami.relativehighutilitypattern.basic.rhuim)": [[90, "PAMI.relativeHighUtilityPattern.basic.RHUIM.RHUIM"]], "getmemoryrss() (pami.relativehighutilitypattern.basic.rhuim.rhuim method)": [[90, "PAMI.relativeHighUtilityPattern.basic.RHUIM.RHUIM.getMemoryRSS"]], "getmemoryuss() (pami.relativehighutilitypattern.basic.rhuim.rhuim method)": [[90, "PAMI.relativeHighUtilityPattern.basic.RHUIM.RHUIM.getMemoryUSS"]], "getpatterns() (pami.relativehighutilitypattern.basic.rhuim.rhuim method)": [[90, "PAMI.relativeHighUtilityPattern.basic.RHUIM.RHUIM.getPatterns"]], "getpatternsasdataframe() (pami.relativehighutilitypattern.basic.rhuim.rhuim method)": [[90, "PAMI.relativeHighUtilityPattern.basic.RHUIM.RHUIM.getPatternsAsDataFrame"]], "getruntime() (pami.relativehighutilitypattern.basic.rhuim.rhuim method)": [[90, "PAMI.relativeHighUtilityPattern.basic.RHUIM.RHUIM.getRuntime"]], "printresults() (pami.relativehighutilitypattern.basic.rhuim.rhuim method)": [[90, "PAMI.relativeHighUtilityPattern.basic.RHUIM.RHUIM.printResults"]], "save() (pami.relativehighutilitypattern.basic.rhuim.rhuim method)": [[90, "PAMI.relativeHighUtilityPattern.basic.RHUIM.RHUIM.save"]], "sortdatabase() (pami.relativehighutilitypattern.basic.rhuim.rhuim method)": [[90, "PAMI.relativeHighUtilityPattern.basic.RHUIM.RHUIM.sortDatabase"]], "sort_transaction() (pami.relativehighutilitypattern.basic.rhuim.rhuim method)": [[90, "PAMI.relativeHighUtilityPattern.basic.RHUIM.RHUIM.sort_transaction"]], "startmine() (pami.relativehighutilitypattern.basic.rhuim.rhuim method)": [[90, "PAMI.relativeHighUtilityPattern.basic.RHUIM.RHUIM.startMine"]], "pami.sequence": [[91, "module-PAMI.sequence"]], "pami.sequentialpatternmining": [[92, "module-PAMI.sequentialPatternMining"]], "dfspruning() (pami.sequentialpatternmining.basic.spam.spam method)": [[93, "PAMI.sequentialPatternMining.basic.SPAM.SPAM.DfsPruning"]], "mine() (pami.sequentialpatternmining.basic.spade.spade method)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE.Mine"]], "mine() (pami.sequentialpatternmining.basic.prefixspan.prefixspan method)": [[93, "PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan.Mine"]], "pami.sequentialpatternmining.basic": [[93, "module-PAMI.sequentialPatternMining.basic"]], "pami.sequentialpatternmining.basic.spade": [[93, "module-PAMI.sequentialPatternMining.basic.SPADE"]], "pami.sequentialpatternmining.basic.spam": [[93, "module-PAMI.sequentialPatternMining.basic.SPAM"]], "pami.sequentialpatternmining.basic.abstract": [[93, "module-PAMI.sequentialPatternMining.basic.abstract"]], "pami.sequentialpatternmining.basic.prefixspan": [[93, "module-PAMI.sequentialPatternMining.basic.prefixSpan"]], "spade (class in pami.sequentialpatternmining.basic.spade)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE"]], "spam (class in pami.sequentialpatternmining.basic.spam)": [[93, "PAMI.sequentialPatternMining.basic.SPAM.SPAM"]], "sstep() (pami.sequentialpatternmining.basic.spam.spam method)": [[93, "PAMI.sequentialPatternMining.basic.SPAM.SPAM.Sstep"]], "countsup() (pami.sequentialpatternmining.basic.spam.spam method)": [[93, "PAMI.sequentialPatternMining.basic.SPAM.SPAM.countSup"]], "getmemoryrss() (pami.sequentialpatternmining.basic.spade.spade method)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE.getMemoryRSS"]], "getmemoryrss() (pami.sequentialpatternmining.basic.spam.spam method)": [[93, "PAMI.sequentialPatternMining.basic.SPAM.SPAM.getMemoryRSS"]], "getmemoryrss() (pami.sequentialpatternmining.basic.prefixspan.prefixspan method)": [[93, "PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan.getMemoryRSS"]], "getmemoryuss() (pami.sequentialpatternmining.basic.spade.spade method)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE.getMemoryUSS"]], "getmemoryuss() (pami.sequentialpatternmining.basic.spam.spam method)": [[93, "PAMI.sequentialPatternMining.basic.SPAM.SPAM.getMemoryUSS"]], "getmemoryuss() (pami.sequentialpatternmining.basic.prefixspan.prefixspan method)": [[93, "PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan.getMemoryUSS"]], "getpatterns() (pami.sequentialpatternmining.basic.spade.spade method)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE.getPatterns"]], "getpatterns() (pami.sequentialpatternmining.basic.spam.spam method)": [[93, "PAMI.sequentialPatternMining.basic.SPAM.SPAM.getPatterns"]], "getpatterns() (pami.sequentialpatternmining.basic.prefixspan.prefixspan method)": [[93, "PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan.getPatterns"]], "getpatternsasdataframe() (pami.sequentialpatternmining.basic.spade.spade method)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.sequentialpatternmining.basic.spam.spam method)": [[93, "PAMI.sequentialPatternMining.basic.SPAM.SPAM.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.sequentialpatternmining.basic.prefixspan.prefixspan method)": [[93, "PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan.getPatternsAsDataFrame"]], "getruntime() (pami.sequentialpatternmining.basic.spade.spade method)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE.getRuntime"]], "getruntime() (pami.sequentialpatternmining.basic.spam.spam method)": [[93, "PAMI.sequentialPatternMining.basic.SPAM.SPAM.getRuntime"]], "getruntime() (pami.sequentialpatternmining.basic.prefixspan.prefixspan method)": [[93, "PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan.getRuntime"]], "getsameseq() (pami.sequentialpatternmining.basic.prefixspan.prefixspan method)": [[93, "PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan.getSameSeq"]], "make1lendatabase() (pami.sequentialpatternmining.basic.spade.spade method)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE.make1LenDatabase"]], "make2bitdatabase() (pami.sequentialpatternmining.basic.spam.spam method)": [[93, "PAMI.sequentialPatternMining.basic.SPAM.SPAM.make2BitDatabase"]], "make2lendatabase() (pami.sequentialpatternmining.basic.spade.spade method)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE.make2LenDatabase"]], "make3lendatabase() (pami.sequentialpatternmining.basic.spade.spade method)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE.make3LenDatabase"]], "makenext() (pami.sequentialpatternmining.basic.prefixspan.prefixspan method)": [[93, "PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan.makeNext"]], "makenextrow() (pami.sequentialpatternmining.basic.spade.spade method)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE.makeNextRow"]], "makenextrowsame() (pami.sequentialpatternmining.basic.spade.spade method)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE.makeNextRowSame"]], "makenextrowsame2() (pami.sequentialpatternmining.basic.spade.spade method)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE.makeNextRowSame2"]], "makenextrowsame3() (pami.sequentialpatternmining.basic.spade.spade method)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE.makeNextRowSame3"]], "makenextsame() (pami.sequentialpatternmining.basic.prefixspan.prefixspan method)": [[93, "PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan.makeNextSame"]], "makeseqdatabasefirst() (pami.sequentialpatternmining.basic.prefixspan.prefixspan method)": [[93, "PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan.makeSeqDatabaseFirst"]], "makeseqdatabasesame() (pami.sequentialpatternmining.basic.prefixspan.prefixspan method)": [[93, "PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan.makeSeqDatabaseSame"]], "makesupdatabase() (pami.sequentialpatternmining.basic.prefixspan.prefixspan method)": [[93, "PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan.makeSupDatabase"]], "makexlendatabase() (pami.sequentialpatternmining.basic.spade.spade method)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE.makexLenDatabase"]], "makexlendatabasesame() (pami.sequentialpatternmining.basic.spade.spade method)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE.makexLenDatabaseSame"]], "prefixspan (class in pami.sequentialpatternmining.basic.prefixspan)": [[93, "PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan"]], "printresults() (pami.sequentialpatternmining.basic.spade.spade method)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE.printResults"]], "printresults() (pami.sequentialpatternmining.basic.spam.spam method)": [[93, "PAMI.sequentialPatternMining.basic.SPAM.SPAM.printResults"]], "printresults() (pami.sequentialpatternmining.basic.prefixspan.prefixspan method)": [[93, "PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan.printResults"]], "save() (pami.sequentialpatternmining.basic.spade.spade method)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE.save"]], "save() (pami.sequentialpatternmining.basic.spam.spam method)": [[93, "PAMI.sequentialPatternMining.basic.SPAM.SPAM.save"]], "save() (pami.sequentialpatternmining.basic.prefixspan.prefixspan method)": [[93, "PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan.save"]], "serchsame() (pami.sequentialpatternmining.basic.prefixspan.prefixspan method)": [[93, "PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan.serchSame"]], "startmine() (pami.sequentialpatternmining.basic.spade.spade method)": [[93, "PAMI.sequentialPatternMining.basic.SPADE.SPADE.startMine"]], "startmine() (pami.sequentialpatternmining.basic.spam.spam method)": [[93, "PAMI.sequentialPatternMining.basic.SPAM.SPAM.startMine"]], "startmine() (pami.sequentialpatternmining.basic.prefixspan.prefixspan method)": [[93, "PAMI.sequentialPatternMining.basic.prefixSpan.prefixSpan.startMine"]], "pami.sequentialpatternmining.closed": [[94, "module-PAMI.sequentialPatternMining.closed"]], "pami.sequentialpatternmining.closed.abstract": [[94, "module-PAMI.sequentialPatternMining.closed.abstract"]], "pami.sequentialpatternmining.closed.bide": [[94, "module-PAMI.sequentialPatternMining.closed.bide"]], "pami.stableperiodicfrequentpattern": [[95, "module-PAMI.stablePeriodicFrequentPattern"]], "pami.stableperiodicfrequentpattern.basic": [[96, "module-PAMI.stablePeriodicFrequentPattern.basic"]], "pami.stableperiodicfrequentpattern.basic.sppeclat": [[96, "module-PAMI.stablePeriodicFrequentPattern.basic.SPPEclat"]], "pami.stableperiodicfrequentpattern.basic.abstract": [[96, "module-PAMI.stablePeriodicFrequentPattern.basic.abstract"]], "sppeclat (class in pami.stableperiodicfrequentpattern.basic.sppeclat)": [[96, "PAMI.stablePeriodicFrequentPattern.basic.SPPEclat.SPPEclat"]], "getmemoryrss() (pami.stableperiodicfrequentpattern.basic.sppeclat.sppeclat method)": [[96, "PAMI.stablePeriodicFrequentPattern.basic.SPPEclat.SPPEclat.getMemoryRSS"]], "getmemoryuss() (pami.stableperiodicfrequentpattern.basic.sppeclat.sppeclat method)": [[96, "PAMI.stablePeriodicFrequentPattern.basic.SPPEclat.SPPEclat.getMemoryUSS"]], "getpatterns() (pami.stableperiodicfrequentpattern.basic.sppeclat.sppeclat method)": [[96, "PAMI.stablePeriodicFrequentPattern.basic.SPPEclat.SPPEclat.getPatterns"]], "getpatternsasdataframe() (pami.stableperiodicfrequentpattern.basic.sppeclat.sppeclat method)": [[96, "PAMI.stablePeriodicFrequentPattern.basic.SPPEclat.SPPEclat.getPatternsAsDataFrame"]], "getruntime() (pami.stableperiodicfrequentpattern.basic.sppeclat.sppeclat method)": [[96, "PAMI.stablePeriodicFrequentPattern.basic.SPPEclat.SPPEclat.getRuntime"]], "mine() (pami.stableperiodicfrequentpattern.basic.sppeclat.sppeclat method)": [[96, "PAMI.stablePeriodicFrequentPattern.basic.SPPEclat.SPPEclat.mine"]], "printresults() (pami.stableperiodicfrequentpattern.basic.sppeclat.sppeclat method)": [[96, "PAMI.stablePeriodicFrequentPattern.basic.SPPEclat.SPPEclat.printResults"]], "save() (pami.stableperiodicfrequentpattern.basic.sppeclat.sppeclat method)": [[96, "PAMI.stablePeriodicFrequentPattern.basic.SPPEclat.SPPEclat.save"]], "startmine() (pami.stableperiodicfrequentpattern.basic.sppeclat.sppeclat method)": [[96, "PAMI.stablePeriodicFrequentPattern.basic.SPPEclat.SPPEclat.startMine"]], "pami.stableperiodicfrequentpattern.topk": [[97, "module-PAMI.stablePeriodicFrequentPattern.topK"]], "pami.stableperiodicfrequentpattern.topk.tspin": [[97, "module-PAMI.stablePeriodicFrequentPattern.topK.TSPIN"]], "pami.stableperiodicfrequentpattern.topk.abstract": [[97, "module-PAMI.stablePeriodicFrequentPattern.topK.abstract"]], "tspin (class in pami.stableperiodicfrequentpattern.topk.tspin)": [[97, "PAMI.stablePeriodicFrequentPattern.topK.TSPIN.TSPIN"]], "getmemoryrss() (pami.stableperiodicfrequentpattern.topk.tspin.tspin method)": [[97, "PAMI.stablePeriodicFrequentPattern.topK.TSPIN.TSPIN.getMemoryRSS"]], "getmemoryuss() (pami.stableperiodicfrequentpattern.topk.tspin.tspin method)": [[97, "PAMI.stablePeriodicFrequentPattern.topK.TSPIN.TSPIN.getMemoryUSS"]], "getpatterns() (pami.stableperiodicfrequentpattern.topk.tspin.tspin method)": [[97, "PAMI.stablePeriodicFrequentPattern.topK.TSPIN.TSPIN.getPatterns"]], "getpatternsasdataframe() (pami.stableperiodicfrequentpattern.topk.tspin.tspin method)": [[97, "PAMI.stablePeriodicFrequentPattern.topK.TSPIN.TSPIN.getPatternsAsDataFrame"]], "getruntime() (pami.stableperiodicfrequentpattern.topk.tspin.tspin method)": [[97, "PAMI.stablePeriodicFrequentPattern.topK.TSPIN.TSPIN.getRuntime"]], "printresults() (pami.stableperiodicfrequentpattern.topk.tspin.tspin method)": [[97, "PAMI.stablePeriodicFrequentPattern.topK.TSPIN.TSPIN.printResults"]], "save() (pami.stableperiodicfrequentpattern.topk.tspin.tspin method)": [[97, "PAMI.stablePeriodicFrequentPattern.topK.TSPIN.TSPIN.save"]], "startmine() (pami.stableperiodicfrequentpattern.topk.tspin.tspin method)": [[97, "PAMI.stablePeriodicFrequentPattern.topK.TSPIN.TSPIN.startMine"]], "pami.subgraphmining": [[98, "module-PAMI.subgraphMining"]], "dfscode (class in pami.subgraphmining.basic.dfscode)": [[99, "PAMI.subgraphMining.basic.dfsCode.DFSCode"]], "edge (class in pami.subgraphmining.basic.edge)": [[99, "PAMI.subgraphMining.basic.edge.Edge"]], "extendededge (class in pami.subgraphmining.basic.extendededge)": [[99, "PAMI.subgraphMining.basic.extendedEdge.ExtendedEdge"]], "frequentsubgraph (class in pami.subgraphmining.basic.frequentsubgraph)": [[99, "PAMI.subgraphMining.basic.frequentSubgraph.FrequentSubgraph"]], "gspan (class in pami.subgraphmining.basic.gspan)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan"]], "gspan.pair (class in pami.subgraphmining.basic.gspan)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.Pair"]], "graph (class in pami.subgraphmining.basic.graph)": [[99, "PAMI.subgraphMining.basic.graph.Graph"]], "pami.subgraphmining.basic": [[99, "module-PAMI.subgraphMining.basic"]], "pami.subgraphmining.basic.abstract": [[99, "module-PAMI.subgraphMining.basic.abstract"]], "pami.subgraphmining.basic.dfscode": [[99, "module-PAMI.subgraphMining.basic.dfsCode"]], "pami.subgraphmining.basic.edge": [[99, "module-PAMI.subgraphMining.basic.edge"]], "pami.subgraphmining.basic.extendededge": [[99, "module-PAMI.subgraphMining.basic.extendedEdge"]], "pami.subgraphmining.basic.frequentsubgraph": [[99, "module-PAMI.subgraphMining.basic.frequentSubgraph"]], "pami.subgraphmining.basic.graph": [[99, "module-PAMI.subgraphMining.basic.graph"]], "pami.subgraphmining.basic.gspan": [[99, "module-PAMI.subgraphMining.basic.gspan"]], "pami.subgraphmining.basic.sparsetriangularmatrix": [[99, "module-PAMI.subgraphMining.basic.sparseTriangularMatrix"]], "pami.subgraphmining.basic.vertex": [[99, "module-PAMI.subgraphMining.basic.vertex"]], "sparsetriangularmatrix (class in pami.subgraphmining.basic.sparsetriangularmatrix)": [[99, "PAMI.subgraphMining.basic.sparseTriangularMatrix.SparseTriangularMatrix"]], "vertex (class in pami.subgraphmining.basic.vertex)": [[99, "PAMI.subgraphMining.basic.vertex.Vertex"]], "add() (pami.subgraphmining.basic.dfscode.dfscode method)": [[99, "PAMI.subgraphMining.basic.dfsCode.DFSCode.add"]], "addedge() (pami.subgraphmining.basic.vertex.vertex method)": [[99, "PAMI.subgraphMining.basic.vertex.Vertex.addEdge"]], "another() (pami.subgraphmining.basic.edge.edge method)": [[99, "PAMI.subgraphMining.basic.edge.Edge.another"]], "containedge() (pami.subgraphmining.basic.dfscode.dfscode method)": [[99, "PAMI.subgraphMining.basic.dfsCode.DFSCode.containEdge"]], "copy() (pami.subgraphmining.basic.dfscode.dfscode method)": [[99, "PAMI.subgraphMining.basic.dfsCode.DFSCode.copy"]], "edge_count_pruning (pami.subgraphmining.basic.gspan.gspan attribute)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.edge_count_pruning"]], "eliminate_infrequent_edge_labels (pami.subgraphmining.basic.gspan.gspan attribute)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.eliminate_infrequent_edge_labels"]], "eliminate_infrequent_vertex_pairs (pami.subgraphmining.basic.gspan.gspan attribute)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.eliminate_infrequent_vertex_pairs"]], "eliminate_infrequent_vertices (pami.subgraphmining.basic.gspan.gspan attribute)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.eliminate_infrequent_vertices"]], "emptyintegerarray (pami.subgraphmining.basic.graph.graph attribute)": [[99, "PAMI.subgraphMining.basic.graph.Graph.emptyIntegerArray"]], "emptyvertexlist (pami.subgraphmining.basic.graph.graph attribute)": [[99, "PAMI.subgraphMining.basic.graph.Graph.emptyVertexList"]], "findallonlyonevertex() (pami.subgraphmining.basic.gspan.gspan method)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.findAllOnlyOneVertex"]], "findallwithlabel() (pami.subgraphmining.basic.graph.graph method)": [[99, "PAMI.subgraphMining.basic.graph.Graph.findAllWithLabel"]], "gspan() (pami.subgraphmining.basic.gspan.gspan method)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.gSpan"]], "getallneighbors() (pami.subgraphmining.basic.graph.graph method)": [[99, "PAMI.subgraphMining.basic.graph.Graph.getAllNeighbors"]], "getallvlabels() (pami.subgraphmining.basic.dfscode.dfscode method)": [[99, "PAMI.subgraphMining.basic.dfsCode.DFSCode.getAllVLabels"]], "getallvertices() (pami.subgraphmining.basic.graph.graph method)": [[99, "PAMI.subgraphMining.basic.graph.Graph.getAllVertices"]], "getat() (pami.subgraphmining.basic.dfscode.dfscode method)": [[99, "PAMI.subgraphMining.basic.dfsCode.DFSCode.getAt"]], "getedge() (pami.subgraphmining.basic.graph.graph method)": [[99, "PAMI.subgraphMining.basic.graph.Graph.getEdge"]], "getedgecount() (pami.subgraphmining.basic.graph.graph method)": [[99, "PAMI.subgraphMining.basic.graph.Graph.getEdgeCount"]], "getedgelabel() (pami.subgraphmining.basic.edge.edge method)": [[99, "PAMI.subgraphMining.basic.edge.Edge.getEdgeLabel"]], "getedgelabel() (pami.subgraphmining.basic.extendededge.extendededge method)": [[99, "PAMI.subgraphMining.basic.extendedEdge.ExtendedEdge.getEdgeLabel"]], "getedgelabel() (pami.subgraphmining.basic.graph.graph method)": [[99, "PAMI.subgraphMining.basic.graph.Graph.getEdgeLabel"]], "getedgelist() (pami.subgraphmining.basic.vertex.vertex method)": [[99, "PAMI.subgraphMining.basic.vertex.Vertex.getEdgeList"]], "geteelist() (pami.subgraphmining.basic.dfscode.dfscode method)": [[99, "PAMI.subgraphMining.basic.dfsCode.DFSCode.getEeList"]], "getfrequentsubgraphs() (pami.subgraphmining.basic.gspan.gspan method)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.getFrequentSubgraphs"]], "getid() (pami.subgraphmining.basic.graph.graph method)": [[99, "PAMI.subgraphMining.basic.graph.Graph.getId"]], "getid() (pami.subgraphmining.basic.vertex.vertex method)": [[99, "PAMI.subgraphMining.basic.vertex.Vertex.getId"]], "getlabel() (pami.subgraphmining.basic.vertex.vertex method)": [[99, "PAMI.subgraphMining.basic.vertex.Vertex.getLabel"]], "getmemoryrss() (pami.subgraphmining.basic.gspan.gspan method)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.getMemoryRSS"]], "getmemoryuss() (pami.subgraphmining.basic.gspan.gspan method)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.getMemoryUSS"]], "getnonprecalculatedallvertices() (pami.subgraphmining.basic.graph.graph method)": [[99, "PAMI.subgraphMining.basic.graph.Graph.getNonPrecalculatedAllVertices"]], "getrightmost() (pami.subgraphmining.basic.dfscode.dfscode method)": [[99, "PAMI.subgraphMining.basic.dfsCode.DFSCode.getRightMost"]], "getrightmostpath() (pami.subgraphmining.basic.dfscode.dfscode method)": [[99, "PAMI.subgraphMining.basic.dfsCode.DFSCode.getRightMostPath"]], "getruntime() (pami.subgraphmining.basic.gspan.gspan method)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.getRuntime"]], "getsupportforitems() (pami.subgraphmining.basic.sparsetriangularmatrix.sparsetriangularmatrix method)": [[99, "PAMI.subgraphMining.basic.sparseTriangularMatrix.SparseTriangularMatrix.getSupportForItems"]], "getv1() (pami.subgraphmining.basic.extendededge.extendededge method)": [[99, "PAMI.subgraphMining.basic.extendedEdge.ExtendedEdge.getV1"]], "getv2() (pami.subgraphmining.basic.extendededge.extendededge method)": [[99, "PAMI.subgraphMining.basic.extendedEdge.ExtendedEdge.getV2"]], "getvlabel() (pami.subgraphmining.basic.graph.graph method)": [[99, "PAMI.subgraphMining.basic.graph.Graph.getVLabel"]], "getvlabel1() (pami.subgraphmining.basic.extendededge.extendededge method)": [[99, "PAMI.subgraphMining.basic.extendedEdge.ExtendedEdge.getVLabel1"]], "getvlabel2() (pami.subgraphmining.basic.extendededge.extendededge method)": [[99, "PAMI.subgraphMining.basic.extendedEdge.ExtendedEdge.getVLabel2"]], "gspandfs() (pami.subgraphmining.basic.gspan.gspan method)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.gspanDFS"]], "incrementcount() (pami.subgraphmining.basic.sparsetriangularmatrix.sparsetriangularmatrix method)": [[99, "PAMI.subgraphMining.basic.sparseTriangularMatrix.SparseTriangularMatrix.incrementCount"]], "iscanonical() (pami.subgraphmining.basic.gspan.gspan method)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.isCanonical"]], "isempty() (pami.subgraphmining.basic.dfscode.dfscode method)": [[99, "PAMI.subgraphMining.basic.dfsCode.DFSCode.isEmpty"]], "isneighboring() (pami.subgraphmining.basic.graph.graph method)": [[99, "PAMI.subgraphMining.basic.graph.Graph.isNeighboring"]], "notpreofrm() (pami.subgraphmining.basic.dfscode.dfscode method)": [[99, "PAMI.subgraphMining.basic.dfsCode.DFSCode.notPreOfRm"]], "onrightmostpath() (pami.subgraphmining.basic.dfscode.dfscode method)": [[99, "PAMI.subgraphMining.basic.dfsCode.DFSCode.onRightMostPath"]], "pairsmallerthan() (pami.subgraphmining.basic.extendededge.extendededge method)": [[99, "PAMI.subgraphMining.basic.extendedEdge.ExtendedEdge.pairSmallerThan"]], "precalculatelabelstovertices() (pami.subgraphmining.basic.graph.graph method)": [[99, "PAMI.subgraphMining.basic.graph.Graph.precalculateLabelsToVertices"]], "precalculatevertexlist() (pami.subgraphmining.basic.graph.graph method)": [[99, "PAMI.subgraphMining.basic.graph.Graph.precalculateVertexList"]], "precalculatevertexneighbors() (pami.subgraphmining.basic.graph.graph method)": [[99, "PAMI.subgraphMining.basic.graph.Graph.precalculateVertexNeighbors"]], "readgraphs() (pami.subgraphmining.basic.gspan.gspan method)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.readGraphs"]], "removeedge() (pami.subgraphmining.basic.vertex.vertex method)": [[99, "PAMI.subgraphMining.basic.vertex.Vertex.removeEdge"]], "removeinfrequententriesfrommatrix() (pami.subgraphmining.basic.sparsetriangularmatrix.sparsetriangularmatrix method)": [[99, "PAMI.subgraphMining.basic.sparseTriangularMatrix.SparseTriangularMatrix.removeInfrequentEntriesFromMatrix"]], "removeinfrequentlabel() (pami.subgraphmining.basic.graph.graph method)": [[99, "PAMI.subgraphMining.basic.graph.Graph.removeInfrequentLabel"]], "removeinfrequentvertexpairs() (pami.subgraphmining.basic.gspan.gspan method)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.removeInfrequentVertexPairs"]], "rightmostpathextensions() (pami.subgraphmining.basic.gspan.gspan method)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.rightMostPathExtensions"]], "rightmostpathextensionsfromsingle() (pami.subgraphmining.basic.gspan.gspan method)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.rightMostPathExtensionsFromSingle"]], "save() (pami.subgraphmining.basic.gspan.gspan method)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.save"]], "setsupport() (pami.subgraphmining.basic.sparsetriangularmatrix.sparsetriangularmatrix method)": [[99, "PAMI.subgraphMining.basic.sparseTriangularMatrix.SparseTriangularMatrix.setSupport"]], "smallerthan() (pami.subgraphmining.basic.extendededge.extendededge method)": [[99, "PAMI.subgraphMining.basic.extendedEdge.ExtendedEdge.smallerThan"]], "smallerthanoriginal() (pami.subgraphmining.basic.extendededge.extendededge method)": [[99, "PAMI.subgraphMining.basic.extendedEdge.ExtendedEdge.smallerThanOriginal"]], "startmine() (pami.subgraphmining.basic.gspan.gspan method)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.startMine"]], "subgraphisomorphisms() (pami.subgraphmining.basic.gspan.gspan method)": [[99, "PAMI.subgraphMining.basic.gspan.GSpan.subgraphIsomorphisms"]], "dynamic_search (pami.subgraphmining.topk.tkg.tkg attribute)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.DYNAMIC_SEARCH"]], "dfscode (class in pami.subgraphmining.topk.dfscode)": [[100, "PAMI.subgraphMining.topK.DFSCode.DfsCode"]], "dfsthread (class in pami.subgraphmining.topk.dfsthread)": [[100, "PAMI.subgraphMining.topK.DFSThread.DfsThread"]], "edge_count_pruning (pami.subgraphmining.topk.tkg.tkg attribute)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.EDGE_COUNT_PRUNING"]], "eliminate_infrequent_edge_labels (pami.subgraphmining.topk.tkg.tkg attribute)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.ELIMINATE_INFREQUENT_EDGE_LABELS"]], "eliminate_infrequent_vertex_pairs (pami.subgraphmining.topk.tkg.tkg attribute)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.ELIMINATE_INFREQUENT_VERTEX_PAIRS"]], "eliminate_infrequent_vertices (pami.subgraphmining.topk.tkg.tkg attribute)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.ELIMINATE_INFREQUENT_VERTICES"]], "empty_integer_array (pami.subgraphmining.topk.graph.graph attribute)": [[100, "PAMI.subgraphMining.topK.graph.Graph.EMPTY_INTEGER_ARRAY"]], "empty_vertex_list (pami.subgraphmining.topk.graph.graph attribute)": [[100, "PAMI.subgraphMining.topK.graph.Graph.EMPTY_VERTEX_LIST"]], "edge (class in pami.subgraphmining.topk.edge)": [[100, "PAMI.subgraphMining.topK.edge.Edge"]], "extendededge (class in pami.subgraphmining.topk.extendededge)": [[100, "PAMI.subgraphMining.topK.extendedEdge.ExtendedEdge"]], "frequentsubgraph (class in pami.subgraphmining.topk.frequentsubgraph)": [[100, "PAMI.subgraphMining.topK.frequentSubgraph.FrequentSubgraph"]], "graph (class in pami.subgraphmining.topk.graph)": [[100, "PAMI.subgraphMining.topK.graph.Graph"]], "pami.subgraphmining.topk": [[100, "module-PAMI.subgraphMining.topK"]], "pami.subgraphmining.topk.dfscode": [[100, "module-PAMI.subgraphMining.topK.DFSCode"]], "pami.subgraphmining.topk.dfsthread": [[100, "module-PAMI.subgraphMining.topK.DFSThread"]], "pami.subgraphmining.topk.abstract": [[100, "module-PAMI.subgraphMining.topK.abstract"]], "pami.subgraphmining.topk.edge": [[100, "module-PAMI.subgraphMining.topK.edge"]], "pami.subgraphmining.topk.extendededge": [[100, "module-PAMI.subgraphMining.topK.extendedEdge"]], "pami.subgraphmining.topk.frequentsubgraph": [[100, "module-PAMI.subgraphMining.topK.frequentSubgraph"]], "pami.subgraphmining.topk.graph": [[100, "module-PAMI.subgraphMining.topK.graph"]], "pami.subgraphmining.topk.sparsetriangularmatrix": [[100, "module-PAMI.subgraphMining.topK.sparseTriangularMatrix"]], "pami.subgraphmining.topk.tkg": [[100, "module-PAMI.subgraphMining.topK.tkg"]], "pami.subgraphmining.topk.vertex": [[100, "module-PAMI.subgraphMining.topK.vertex"]], "sparsetriangularmatrix (class in pami.subgraphmining.topk.sparsetriangularmatrix)": [[100, "PAMI.subgraphMining.topK.sparseTriangularMatrix.SparseTriangularMatrix"]], "threaded_dynamic_search (pami.subgraphmining.topk.tkg.tkg attribute)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.THREADED_DYNAMIC_SEARCH"]], "tkg (class in pami.subgraphmining.topk.tkg)": [[100, "PAMI.subgraphMining.topK.tkg.TKG"]], "tkg.pair (class in pami.subgraphmining.topk.tkg)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.Pair"]], "vertex (class in pami.subgraphmining.topk.vertex)": [[100, "PAMI.subgraphMining.topK.vertex.Vertex"]], "add() (pami.subgraphmining.topk.dfscode.dfscode method)": [[100, "PAMI.subgraphMining.topK.DFSCode.DfsCode.add"]], "addedge() (pami.subgraphmining.topk.vertex.vertex method)": [[100, "PAMI.subgraphMining.topK.vertex.Vertex.addEdge"]], "another() (pami.subgraphmining.topk.edge.edge method)": [[100, "PAMI.subgraphMining.topK.edge.Edge.another"]], "containedge() (pami.subgraphmining.topk.dfscode.dfscode method)": [[100, "PAMI.subgraphMining.topK.DFSCode.DfsCode.containEdge"]], "copy() (pami.subgraphmining.topk.dfscode.dfscode method)": [[100, "PAMI.subgraphMining.topK.DFSCode.DfsCode.copy"]], "findallonlyonevertex() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.findAllOnlyOneVertex"]], "findallwithlabel() (pami.subgraphmining.topk.graph.graph method)": [[100, "PAMI.subgraphMining.topK.graph.Graph.findAllWithLabel"]], "gspan() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.gSpan"]], "getallneighbors() (pami.subgraphmining.topk.graph.graph method)": [[100, "PAMI.subgraphMining.topK.graph.Graph.getAllNeighbors"]], "getallvlabels() (pami.subgraphmining.topk.dfscode.dfscode method)": [[100, "PAMI.subgraphMining.topK.DFSCode.DfsCode.getAllVLabels"]], "getallvertices() (pami.subgraphmining.topk.graph.graph method)": [[100, "PAMI.subgraphMining.topK.graph.Graph.getAllVertices"]], "getat() (pami.subgraphmining.topk.dfscode.dfscode method)": [[100, "PAMI.subgraphMining.topK.DFSCode.DfsCode.getAt"]], "getedge() (pami.subgraphmining.topk.graph.graph method)": [[100, "PAMI.subgraphMining.topK.graph.Graph.getEdge"]], "getedgecount() (pami.subgraphmining.topk.graph.graph method)": [[100, "PAMI.subgraphMining.topK.graph.Graph.getEdgeCount"]], "getedgelabel() (pami.subgraphmining.topk.edge.edge method)": [[100, "PAMI.subgraphMining.topK.edge.Edge.getEdgeLabel"]], "getedgelabel() (pami.subgraphmining.topk.extendededge.extendededge method)": [[100, "PAMI.subgraphMining.topK.extendedEdge.ExtendedEdge.getEdgeLabel"]], "getedgelabel() (pami.subgraphmining.topk.graph.graph method)": [[100, "PAMI.subgraphMining.topK.graph.Graph.getEdgeLabel"]], "getedgelist() (pami.subgraphmining.topk.vertex.vertex method)": [[100, "PAMI.subgraphMining.topK.vertex.Vertex.getEdgeList"]], "geteelist() (pami.subgraphmining.topk.dfscode.dfscode method)": [[100, "PAMI.subgraphMining.topK.DFSCode.DfsCode.getEeList"]], "getid() (pami.subgraphmining.topk.graph.graph method)": [[100, "PAMI.subgraphMining.topK.graph.Graph.getId"]], "getid() (pami.subgraphmining.topk.vertex.vertex method)": [[100, "PAMI.subgraphMining.topK.vertex.Vertex.getId"]], "getksubgraphs() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.getKSubgraphs"]], "getlabel() (pami.subgraphmining.topk.vertex.vertex method)": [[100, "PAMI.subgraphMining.topK.vertex.Vertex.getLabel"]], "getmemoryrss() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.getMemoryRSS"]], "getmemoryuss() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.getMemoryUSS"]], "getminsupport() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.getMinSupport"]], "getnonprecalculatedallvertices() (pami.subgraphmining.topk.graph.graph method)": [[100, "PAMI.subgraphMining.topK.graph.Graph.getNonPrecalculatedAllVertices"]], "getqueuesize() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.getQueueSize"]], "getrightmost() (pami.subgraphmining.topk.dfscode.dfscode method)": [[100, "PAMI.subgraphMining.topK.DFSCode.DfsCode.getRightMost"]], "getrightmostpath() (pami.subgraphmining.topk.dfscode.dfscode method)": [[100, "PAMI.subgraphMining.topK.DFSCode.DfsCode.getRightMostPath"]], "getruntime() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.getRuntime"]], "getsubgraphs() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.getSubgraphs"]], "getsupportforitems() (pami.subgraphmining.topk.sparsetriangularmatrix.sparsetriangularmatrix method)": [[100, "PAMI.subgraphMining.topK.sparseTriangularMatrix.SparseTriangularMatrix.getSupportForItems"]], "getv1() (pami.subgraphmining.topk.extendededge.extendededge method)": [[100, "PAMI.subgraphMining.topK.extendedEdge.ExtendedEdge.getV1"]], "getv2() (pami.subgraphmining.topk.extendededge.extendededge method)": [[100, "PAMI.subgraphMining.topK.extendedEdge.ExtendedEdge.getV2"]], "getvlabel() (pami.subgraphmining.topk.graph.graph method)": [[100, "PAMI.subgraphMining.topK.graph.Graph.getVLabel"]], "getvlabel1() (pami.subgraphmining.topk.extendededge.extendededge method)": [[100, "PAMI.subgraphMining.topK.extendedEdge.ExtendedEdge.getVLabel1"]], "getvlabel2() (pami.subgraphmining.topk.extendededge.extendededge method)": [[100, "PAMI.subgraphMining.topK.extendedEdge.ExtendedEdge.getVLabel2"]], "gspandfs() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.gspanDfs"]], "gspandynamicdfs() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.gspanDynamicDFS"]], "incrementcount() (pami.subgraphmining.topk.sparsetriangularmatrix.sparsetriangularmatrix method)": [[100, "PAMI.subgraphMining.topK.sparseTriangularMatrix.SparseTriangularMatrix.incrementCount"]], "iscanonical() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.isCanonical"]], "isempty() (pami.subgraphmining.topk.dfscode.dfscode method)": [[100, "PAMI.subgraphMining.topK.DFSCode.DfsCode.isEmpty"]], "isneighboring() (pami.subgraphmining.topk.graph.graph method)": [[100, "PAMI.subgraphMining.topK.graph.Graph.isNeighboring"]], "notpreofrm() (pami.subgraphmining.topk.dfscode.dfscode method)": [[100, "PAMI.subgraphMining.topK.DFSCode.DfsCode.notPreOfRm"]], "onrightmostpath() (pami.subgraphmining.topk.dfscode.dfscode method)": [[100, "PAMI.subgraphMining.topK.DFSCode.DfsCode.onRightMostPath"]], "pairsmallerthan() (pami.subgraphmining.topk.extendededge.extendededge method)": [[100, "PAMI.subgraphMining.topK.extendedEdge.ExtendedEdge.pairSmallerThan"]], "precalculatelabelstovertices() (pami.subgraphmining.topk.graph.graph method)": [[100, "PAMI.subgraphMining.topK.graph.Graph.precalculateLabelsToVertices"]], "precalculatevertexlist() (pami.subgraphmining.topk.graph.graph method)": [[100, "PAMI.subgraphMining.topK.graph.Graph.precalculateVertexList"]], "precalculatevertexneighbors() (pami.subgraphmining.topk.graph.graph method)": [[100, "PAMI.subgraphMining.topK.graph.Graph.precalculateVertexNeighbors"]], "readgraphs() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.readGraphs"]], "registerascandidate() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.registerAsCandidate"]], "removeedge() (pami.subgraphmining.topk.vertex.vertex method)": [[100, "PAMI.subgraphMining.topK.vertex.Vertex.removeEdge"]], "removeinfrequententriesfrommatrix() (pami.subgraphmining.topk.sparsetriangularmatrix.sparsetriangularmatrix method)": [[100, "PAMI.subgraphMining.topK.sparseTriangularMatrix.SparseTriangularMatrix.removeInfrequentEntriesFromMatrix"]], "removeinfrequentlabel() (pami.subgraphmining.topk.graph.graph method)": [[100, "PAMI.subgraphMining.topK.graph.Graph.removeInfrequentLabel"]], "removeinfrequentvertexpairs() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.removeInfrequentVertexPairs"]], "rightmostpathextensions() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.rightMostPathExtensions"]], "rightmostpathextensionsfromsingle() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.rightMostPathExtensionsFromSingle"]], "run() (pami.subgraphmining.topk.dfsthread.dfsthread method)": [[100, "PAMI.subgraphMining.topK.DFSThread.DfsThread.run"]], "save() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.save"]], "savepattern() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.savePattern"]], "setsupport() (pami.subgraphmining.topk.sparsetriangularmatrix.sparsetriangularmatrix method)": [[100, "PAMI.subgraphMining.topK.sparseTriangularMatrix.SparseTriangularMatrix.setSupport"]], "smallerthan() (pami.subgraphmining.topk.extendededge.extendededge method)": [[100, "PAMI.subgraphMining.topK.extendedEdge.ExtendedEdge.smallerThan"]], "smallerthanoriginal() (pami.subgraphmining.topk.extendededge.extendededge method)": [[100, "PAMI.subgraphMining.topK.extendedEdge.ExtendedEdge.smallerThanOriginal"]], "startmine() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.startMine"]], "startthreads() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.startThreads"]], "subgraphisomorphisms() (pami.subgraphmining.topk.tkg.tkg method)": [[100, "PAMI.subgraphMining.topK.tkg.TKG.subgraphIsomorphisms"]], "pami.uncertainfaulttolerantfrequentpattern": [[101, "module-PAMI.uncertainFaultTolerantFrequentPattern"]], "pami.uncertainfaulttolerantfrequentpattern.vbftmine": [[101, "module-PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine"]], "pami.uncertainfaulttolerantfrequentpattern.abstract": [[101, "module-PAMI.uncertainFaultTolerantFrequentPattern.abstract"]], "vbftmine (class in pami.uncertainfaulttolerantfrequentpattern.vbftmine)": [[101, "PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine.VBFTMine"]], "getmemoryrss() (pami.uncertainfaulttolerantfrequentpattern.vbftmine.vbftmine method)": [[101, "PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine.VBFTMine.getMemoryRSS"]], "getmemoryuss() (pami.uncertainfaulttolerantfrequentpattern.vbftmine.vbftmine method)": [[101, "PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine.VBFTMine.getMemoryUSS"]], "getpatterns() (pami.uncertainfaulttolerantfrequentpattern.vbftmine.vbftmine method)": [[101, "PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine.VBFTMine.getPatterns"]], "getpatternsasdataframe() (pami.uncertainfaulttolerantfrequentpattern.vbftmine.vbftmine method)": [[101, "PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine.VBFTMine.getPatternsAsDataFrame"]], "getruntime() (pami.uncertainfaulttolerantfrequentpattern.vbftmine.vbftmine method)": [[101, "PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine.VBFTMine.getRuntime"]], "mine() (pami.uncertainfaulttolerantfrequentpattern.vbftmine.vbftmine method)": [[101, "PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine.VBFTMine.mine"]], "printresults() (pami.uncertainfaulttolerantfrequentpattern.vbftmine.vbftmine method)": [[101, "PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine.VBFTMine.printResults"]], "save() (pami.uncertainfaulttolerantfrequentpattern.vbftmine.vbftmine method)": [[101, "PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine.VBFTMine.save"]], "startmine() (pami.uncertainfaulttolerantfrequentpattern.vbftmine.vbftmine method)": [[101, "PAMI.uncertainFaultTolerantFrequentPattern.VBFTMine.VBFTMine.startMine"]], "pami.uncertainfrequentpattern": [[102, "module-PAMI.uncertainFrequentPattern"]], "cufptree (class in pami.uncertainfrequentpattern.basic.cufptree)": [[103, "PAMI.uncertainFrequentPattern.basic.CUFPTree.CUFPTree"]], "pami.uncertainfrequentpattern.basic": [[103, "module-PAMI.uncertainFrequentPattern.basic"]], "pami.uncertainfrequentpattern.basic.cufptree": [[103, "module-PAMI.uncertainFrequentPattern.basic.CUFPTree"]], "pami.uncertainfrequentpattern.basic.abstract": [[103, "module-PAMI.uncertainFrequentPattern.basic.abstract"]], "getmemoryrss() (pami.uncertainfrequentpattern.basic.cufptree.cufptree method)": [[103, "PAMI.uncertainFrequentPattern.basic.CUFPTree.CUFPTree.getMemoryRSS"]], "getmemoryuss() (pami.uncertainfrequentpattern.basic.cufptree.cufptree method)": [[103, "PAMI.uncertainFrequentPattern.basic.CUFPTree.CUFPTree.getMemoryUSS"]], "getpatterns() (pami.uncertainfrequentpattern.basic.cufptree.cufptree method)": [[103, "PAMI.uncertainFrequentPattern.basic.CUFPTree.CUFPTree.getPatterns"]], "getpatternsasdataframe() (pami.uncertainfrequentpattern.basic.cufptree.cufptree method)": [[103, "PAMI.uncertainFrequentPattern.basic.CUFPTree.CUFPTree.getPatternsAsDataFrame"]], "getruntime() (pami.uncertainfrequentpattern.basic.cufptree.cufptree method)": [[103, "PAMI.uncertainFrequentPattern.basic.CUFPTree.CUFPTree.getRuntime"]], "mine() (pami.uncertainfrequentpattern.basic.cufptree.cufptree method)": [[103, "PAMI.uncertainFrequentPattern.basic.CUFPTree.CUFPTree.mine"]], "printresults() (pami.uncertainfrequentpattern.basic.cufptree.cufptree method)": [[103, "PAMI.uncertainFrequentPattern.basic.CUFPTree.CUFPTree.printResults"]], "save() (pami.uncertainfrequentpattern.basic.cufptree.cufptree method)": [[103, "PAMI.uncertainFrequentPattern.basic.CUFPTree.CUFPTree.save"]], "startmine() (pami.uncertainfrequentpattern.basic.cufptree.cufptree method)": [[103, "PAMI.uncertainFrequentPattern.basic.CUFPTree.CUFPTree.startMine"]], "pami.uncertaingeoreferencedfrequentpattern": [[104, "module-PAMI.uncertainGeoreferencedFrequentPattern"]], "gfpgrowth (class in pami.uncertaingeoreferencedfrequentpattern.basic.gfpgrowth)": [[105, "PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth.GFPGrowth"]], "pami.uncertaingeoreferencedfrequentpattern.basic": [[105, "module-PAMI.uncertainGeoreferencedFrequentPattern.basic"]], "pami.uncertaingeoreferencedfrequentpattern.basic.gfpgrowth": [[105, "module-PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth"]], "pami.uncertaingeoreferencedfrequentpattern.basic.abstract": [[105, "module-PAMI.uncertainGeoreferencedFrequentPattern.basic.abstract"]], "getmemoryrss() (pami.uncertaingeoreferencedfrequentpattern.basic.gfpgrowth.gfpgrowth method)": [[105, "PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth.GFPGrowth.getMemoryRSS"]], "getmemoryuss() (pami.uncertaingeoreferencedfrequentpattern.basic.gfpgrowth.gfpgrowth method)": [[105, "PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth.GFPGrowth.getMemoryUSS"]], "getpatterns() (pami.uncertaingeoreferencedfrequentpattern.basic.gfpgrowth.gfpgrowth method)": [[105, "PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth.GFPGrowth.getPatterns"]], "getpatternsasdataframe() (pami.uncertaingeoreferencedfrequentpattern.basic.gfpgrowth.gfpgrowth method)": [[105, "PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth.GFPGrowth.getPatternsAsDataFrame"]], "getruntime() (pami.uncertaingeoreferencedfrequentpattern.basic.gfpgrowth.gfpgrowth method)": [[105, "PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth.GFPGrowth.getRuntime"]], "mine() (pami.uncertaingeoreferencedfrequentpattern.basic.gfpgrowth.gfpgrowth method)": [[105, "PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth.GFPGrowth.mine"]], "printresults() (pami.uncertaingeoreferencedfrequentpattern.basic.gfpgrowth.gfpgrowth method)": [[105, "PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth.GFPGrowth.printResults"]], "save() (pami.uncertaingeoreferencedfrequentpattern.basic.gfpgrowth.gfpgrowth method)": [[105, "PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth.GFPGrowth.save"]], "startmine() (pami.uncertaingeoreferencedfrequentpattern.basic.gfpgrowth.gfpgrowth method)": [[105, "PAMI.uncertainGeoreferencedFrequentPattern.basic.GFPGrowth.GFPGrowth.startMine"]], "pami.uncertainperiodicfrequentpattern": [[106, "module-PAMI.uncertainPeriodicFrequentPattern"]], "pami.uncertainperiodicfrequentpattern.basic": [[107, "module-PAMI.uncertainPeriodicFrequentPattern.basic"]], "pami.uncertainperiodicfrequentpattern.basic.upfpgrowth": [[107, "module-PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth"]], "pami.uncertainperiodicfrequentpattern.basic.upfpgrowthplus": [[107, "module-PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus"]], "pami.uncertainperiodicfrequentpattern.basic.abstract": [[107, "module-PAMI.uncertainPeriodicFrequentPattern.basic.abstract"]], "upfpgrowth (class in pami.uncertainperiodicfrequentpattern.basic.upfpgrowth)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth.UPFPGrowth"]], "upfpgrowthplus (class in pami.uncertainperiodicfrequentpattern.basic.upfpgrowthplus)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus.UPFPGrowthPlus"]], "getmemoryrss() (pami.uncertainperiodicfrequentpattern.basic.upfpgrowth.upfpgrowth method)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth.UPFPGrowth.getMemoryRSS"]], "getmemoryrss() (pami.uncertainperiodicfrequentpattern.basic.upfpgrowthplus.upfpgrowthplus method)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus.UPFPGrowthPlus.getMemoryRSS"]], "getmemoryuss() (pami.uncertainperiodicfrequentpattern.basic.upfpgrowth.upfpgrowth method)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth.UPFPGrowth.getMemoryUSS"]], "getmemoryuss() (pami.uncertainperiodicfrequentpattern.basic.upfpgrowthplus.upfpgrowthplus method)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus.UPFPGrowthPlus.getMemoryUSS"]], "getpatterns() (pami.uncertainperiodicfrequentpattern.basic.upfpgrowth.upfpgrowth method)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth.UPFPGrowth.getPatterns"]], "getpatterns() (pami.uncertainperiodicfrequentpattern.basic.upfpgrowthplus.upfpgrowthplus method)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus.UPFPGrowthPlus.getPatterns"]], "getpatternsasdataframe() (pami.uncertainperiodicfrequentpattern.basic.upfpgrowth.upfpgrowth method)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth.UPFPGrowth.getPatternsAsDataFrame"]], "getpatternsasdataframe() (pami.uncertainperiodicfrequentpattern.basic.upfpgrowthplus.upfpgrowthplus method)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus.UPFPGrowthPlus.getPatternsAsDataFrame"]], "getruntime() (pami.uncertainperiodicfrequentpattern.basic.upfpgrowth.upfpgrowth method)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth.UPFPGrowth.getRuntime"]], "getruntime() (pami.uncertainperiodicfrequentpattern.basic.upfpgrowthplus.upfpgrowthplus method)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus.UPFPGrowthPlus.getRuntime"]], "mine() (pami.uncertainperiodicfrequentpattern.basic.upfpgrowth.upfpgrowth method)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth.UPFPGrowth.mine"]], "mine() (pami.uncertainperiodicfrequentpattern.basic.upfpgrowthplus.upfpgrowthplus method)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus.UPFPGrowthPlus.mine"]], "printresults() (pami.uncertainperiodicfrequentpattern.basic.upfpgrowth.upfpgrowth method)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth.UPFPGrowth.printResults"]], "printresults() (pami.uncertainperiodicfrequentpattern.basic.upfpgrowthplus.upfpgrowthplus method)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus.UPFPGrowthPlus.printResults"]], "printtree() (in module pami.uncertainperiodicfrequentpattern.basic.upfpgrowthplus)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus.printTree"]], "save() (pami.uncertainperiodicfrequentpattern.basic.upfpgrowth.upfpgrowth method)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth.UPFPGrowth.save"]], "save() (pami.uncertainperiodicfrequentpattern.basic.upfpgrowthplus.upfpgrowthplus method)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus.UPFPGrowthPlus.save"]], "startmine() (pami.uncertainperiodicfrequentpattern.basic.upfpgrowth.upfpgrowth method)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowth.UPFPGrowth.startMine"]], "startmine() (pami.uncertainperiodicfrequentpattern.basic.upfpgrowthplus.upfpgrowthplus method)": [[107, "PAMI.uncertainPeriodicFrequentPattern.basic.UPFPGrowthPlus.UPFPGrowthPlus.startMine"]], "pami.weightedfrequentneighbourhoodpattern": [[108, "module-PAMI.weightedFrequentNeighbourhoodPattern"]], "pami.weightedfrequentneighbourhoodpattern.basic": [[109, "module-PAMI.weightedFrequentNeighbourhoodPattern.basic"]], "pami.weightedfrequentneighbourhoodpattern.basic.swfpgrowth": [[109, "module-PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth"]], "pami.weightedfrequentneighbourhoodpattern.basic.abstract": [[109, "module-PAMI.weightedFrequentNeighbourhoodPattern.basic.abstract"]], "swfpgrowth (class in pami.weightedfrequentneighbourhoodpattern.basic.swfpgrowth)": [[109, "PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth.SWFPGrowth"]], "getmemoryrss() (pami.weightedfrequentneighbourhoodpattern.basic.swfpgrowth.swfpgrowth method)": [[109, "PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth.SWFPGrowth.getMemoryRSS"]], "getmemoryuss() (pami.weightedfrequentneighbourhoodpattern.basic.swfpgrowth.swfpgrowth method)": [[109, "PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth.SWFPGrowth.getMemoryUSS"]], "getpatterns() (pami.weightedfrequentneighbourhoodpattern.basic.swfpgrowth.swfpgrowth method)": [[109, "PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth.SWFPGrowth.getPatterns"]], "getpatternsasdataframe() (pami.weightedfrequentneighbourhoodpattern.basic.swfpgrowth.swfpgrowth method)": [[109, "PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth.SWFPGrowth.getPatternsAsDataFrame"]], "getruntime() (pami.weightedfrequentneighbourhoodpattern.basic.swfpgrowth.swfpgrowth method)": [[109, "PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth.SWFPGrowth.getRuntime"]], "mine() (pami.weightedfrequentneighbourhoodpattern.basic.swfpgrowth.swfpgrowth method)": [[109, "PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth.SWFPGrowth.mine"]], "printresults() (pami.weightedfrequentneighbourhoodpattern.basic.swfpgrowth.swfpgrowth method)": [[109, "PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth.SWFPGrowth.printResults"]], "save() (pami.weightedfrequentneighbourhoodpattern.basic.swfpgrowth.swfpgrowth method)": [[109, "PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth.SWFPGrowth.save"]], "startmine() (pami.weightedfrequentneighbourhoodpattern.basic.swfpgrowth.swfpgrowth method)": [[109, "PAMI.weightedFrequentNeighbourhoodPattern.basic.SWFPGrowth.SWFPGrowth.startMine"]], "pami.weightedfrequentpattern": [[110, "module-PAMI.weightedFrequentPattern"]], "pami.weightedfrequentpattern.basic": [[111, "module-PAMI.weightedFrequentPattern.basic"]], "pami.weightedfrequentpattern.basic.wfim": [[111, "module-PAMI.weightedFrequentPattern.basic.WFIM"]], "pami.weightedfrequentpattern.basic.abstract": [[111, "module-PAMI.weightedFrequentPattern.basic.abstract"]], "wfim (class in pami.weightedfrequentpattern.basic.wfim)": [[111, "PAMI.weightedFrequentPattern.basic.WFIM.WFIM"]], "getmemoryrss() (pami.weightedfrequentpattern.basic.wfim.wfim method)": [[111, "PAMI.weightedFrequentPattern.basic.WFIM.WFIM.getMemoryRSS"]], "getmemoryuss() (pami.weightedfrequentpattern.basic.wfim.wfim method)": [[111, "PAMI.weightedFrequentPattern.basic.WFIM.WFIM.getMemoryUSS"]], "getpatterns() (pami.weightedfrequentpattern.basic.wfim.wfim method)": [[111, "PAMI.weightedFrequentPattern.basic.WFIM.WFIM.getPatterns"]], "getpatternsasdataframe() (pami.weightedfrequentpattern.basic.wfim.wfim method)": [[111, "PAMI.weightedFrequentPattern.basic.WFIM.WFIM.getPatternsAsDataFrame"]], "getruntime() (pami.weightedfrequentpattern.basic.wfim.wfim method)": [[111, "PAMI.weightedFrequentPattern.basic.WFIM.WFIM.getRuntime"]], "mine() (pami.weightedfrequentpattern.basic.wfim.wfim method)": [[111, "PAMI.weightedFrequentPattern.basic.WFIM.WFIM.mine"]], "printresults() (pami.weightedfrequentpattern.basic.wfim.wfim method)": [[111, "PAMI.weightedFrequentPattern.basic.WFIM.WFIM.printResults"]], "save() (pami.weightedfrequentpattern.basic.wfim.wfim method)": [[111, "PAMI.weightedFrequentPattern.basic.WFIM.WFIM.save"]], "startmine() (pami.weightedfrequentpattern.basic.wfim.wfim method)": [[111, "PAMI.weightedFrequentPattern.basic.WFIM.WFIM.startMine"]], "pami.weightedfrequentregularpattern": [[112, "module-PAMI.weightedFrequentRegularPattern"]], "pami.weightedfrequentregularpattern.basic": [[113, "module-PAMI.weightedFrequentRegularPattern.basic"]], "pami.weightedfrequentregularpattern.basic.wfriminer": [[113, "module-PAMI.weightedFrequentRegularPattern.basic.WFRIMiner"]], "pami.weightedfrequentregularpattern.basic.abstract": [[113, "module-PAMI.weightedFrequentRegularPattern.basic.abstract"]], "wfriminer (class in pami.weightedfrequentregularpattern.basic.wfriminer)": [[113, "PAMI.weightedFrequentRegularPattern.basic.WFRIMiner.WFRIMiner"]], "getmemoryrss() (pami.weightedfrequentregularpattern.basic.wfriminer.wfriminer method)": [[113, "PAMI.weightedFrequentRegularPattern.basic.WFRIMiner.WFRIMiner.getMemoryRSS"]], "getmemoryuss() (pami.weightedfrequentregularpattern.basic.wfriminer.wfriminer method)": [[113, "PAMI.weightedFrequentRegularPattern.basic.WFRIMiner.WFRIMiner.getMemoryUSS"]], "getpatterns() (pami.weightedfrequentregularpattern.basic.wfriminer.wfriminer method)": [[113, "PAMI.weightedFrequentRegularPattern.basic.WFRIMiner.WFRIMiner.getPatterns"]], "getpatternsasdataframe() (pami.weightedfrequentregularpattern.basic.wfriminer.wfriminer method)": [[113, "PAMI.weightedFrequentRegularPattern.basic.WFRIMiner.WFRIMiner.getPatternsAsDataFrame"]], "getruntime() (pami.weightedfrequentregularpattern.basic.wfriminer.wfriminer method)": [[113, "PAMI.weightedFrequentRegularPattern.basic.WFRIMiner.WFRIMiner.getRuntime"]], "mine() (pami.weightedfrequentregularpattern.basic.wfriminer.wfriminer method)": [[113, "PAMI.weightedFrequentRegularPattern.basic.WFRIMiner.WFRIMiner.mine"]], "printresults() (pami.weightedfrequentregularpattern.basic.wfriminer.wfriminer method)": [[113, "PAMI.weightedFrequentRegularPattern.basic.WFRIMiner.WFRIMiner.printResults"]], "save() (pami.weightedfrequentregularpattern.basic.wfriminer.wfriminer method)": [[113, "PAMI.weightedFrequentRegularPattern.basic.WFRIMiner.WFRIMiner.save"]], "startmine() (pami.weightedfrequentregularpattern.basic.wfriminer.wfriminer method)": [[113, "PAMI.weightedFrequentRegularPattern.basic.WFRIMiner.WFRIMiner.startMine"]], "pami.weighteduncertainfrequentpattern": [[114, "module-PAMI.weightedUncertainFrequentPattern"]], "pami.weighteduncertainfrequentpattern.basic": [[115, "module-PAMI.weightedUncertainFrequentPattern.basic"]], "pami.weighteduncertainfrequentpattern.basic.wufim": [[115, "module-PAMI.weightedUncertainFrequentPattern.basic.WUFIM"]], "pami.weighteduncertainfrequentpattern.basic.abstract": [[115, "module-PAMI.weightedUncertainFrequentPattern.basic.abstract"]], "wufim (class in pami.weighteduncertainfrequentpattern.basic.wufim)": [[115, "PAMI.weightedUncertainFrequentPattern.basic.WUFIM.WUFIM"]], "getmemoryrss() (pami.weighteduncertainfrequentpattern.basic.wufim.wufim method)": [[115, "PAMI.weightedUncertainFrequentPattern.basic.WUFIM.WUFIM.getMemoryRSS"]], "getmemoryuss() (pami.weighteduncertainfrequentpattern.basic.wufim.wufim method)": [[115, "PAMI.weightedUncertainFrequentPattern.basic.WUFIM.WUFIM.getMemoryUSS"]], "getpatterns() (pami.weighteduncertainfrequentpattern.basic.wufim.wufim method)": [[115, "PAMI.weightedUncertainFrequentPattern.basic.WUFIM.WUFIM.getPatterns"]], "getpatternsasdataframe() (pami.weighteduncertainfrequentpattern.basic.wufim.wufim method)": [[115, "PAMI.weightedUncertainFrequentPattern.basic.WUFIM.WUFIM.getPatternsAsDataFrame"]], "getruntime() (pami.weighteduncertainfrequentpattern.basic.wufim.wufim method)": [[115, "PAMI.weightedUncertainFrequentPattern.basic.WUFIM.WUFIM.getRuntime"]], "mine() (pami.weighteduncertainfrequentpattern.basic.wufim.wufim method)": [[115, "PAMI.weightedUncertainFrequentPattern.basic.WUFIM.WUFIM.mine"]], "printresults() (pami.weighteduncertainfrequentpattern.basic.wufim.wufim method)": [[115, "PAMI.weightedUncertainFrequentPattern.basic.WUFIM.WUFIM.printResults"]], "save() (pami.weighteduncertainfrequentpattern.basic.wufim.wufim method)": [[115, "PAMI.weightedUncertainFrequentPattern.basic.WUFIM.WUFIM.save"]], "startmine() (pami.weighteduncertainfrequentpattern.basic.wufim.wufim method)": [[115, "PAMI.weightedUncertainFrequentPattern.basic.WUFIM.WUFIM.startMine"]]}}) \ No newline at end of file