Skip to content

Commit

Permalink
Merge pull request #324 from p2t2/DEV2.5.0
Browse files Browse the repository at this point in the history
Dev2.5.0
  • Loading branch information
Michael Reposa committed Nov 11, 2014
2 parents c483d64 + 5a8778c commit 866849f
Show file tree
Hide file tree
Showing 51 changed files with 1,701 additions and 497 deletions.
2 changes: 1 addition & 1 deletion Figaro/META-INF/MANIFEST.MF
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ Manifest-Version: 1.0
Bundle-ManifestVersion: 2
Bundle-Name: Figaro
Bundle-SymbolicName: com.cra.figaro
Bundle-Version: 2.4.0
Bundle-Version: 2.5.0
Export-Package: com.cra.figaro.algorithm,
com.cra.figaro.algorithm.decision,
com.cra.figaro.algorithm.decision.index,
Expand Down
2 changes: 1 addition & 1 deletion Figaro/figaro_build.properties
Original file line number Diff line number Diff line change
@@ -1 +1 @@
version=2.4.0.0
version=2.5.0.0
Original file line number Diff line number Diff line change
Expand Up @@ -561,7 +561,7 @@ object Factory {
}

// When we're using a parameter to compute expected sufficient statistics, we just use its expected value
private def makeFactors(param: Parameter[_]): List[Factor[Double]] = {
private def makeParameterFactors(param: Parameter[_]): List[Factor[Double]] = {
// The parameter should have one possible value, which is its expected value
assert(Variable(param).range.size == 1)
val factor = new BasicFactor[Double](List(Variable(param)))
Expand Down Expand Up @@ -598,7 +598,8 @@ object Factory {
elem match {
case f: ParameterizedFlip => makeFactors(f)
case s: ParameterizedSelect[_] => makeFactors(s)
case p: Parameter[_] => makeFactors(p)
case p: DoubleParameter => makeParameterFactors(p)
case p: ArrayParameter => makeParameterFactors(p)
case c: Constant[_] => makeFactors(c)
case f: AtomicFlip => makeFactors(f)
case f: CompoundFlip => makeFactors(f)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import com.cra.figaro.library.atomic.discrete.OneShifter
* @param totalSamples Maximum number of samples on the output of chains
* @param de An instance to compute the density estimate of point during resampling
*/
class ParticleGenerator(de: DensityEstimator) {
class ParticleGenerator(de: DensityEstimator, val numArgSamples: Int, val numTotalSamples: Int) {

// Caches the samples for an element
private val sampleMap = Map[Element[_], List[(Double, _)]]()
Expand All @@ -37,7 +37,7 @@ class ParticleGenerator(de: DensityEstimator) {
/**
* Retrieves the samples for an element using the default number of samples.
*/
def apply[T](elem: Element[T]): List[(Double, T)] = apply(elem, ParticleGenerator.defaultArgSamples)
def apply[T](elem: Element[T]): List[(Double, T)] = apply(elem, numArgSamples)

/**
* Retrieves the samples for an element using the indicated number of samples
Expand Down Expand Up @@ -75,15 +75,19 @@ class ParticleGenerator(de: DensityEstimator) {
beliefs.map(b => {
val oldValue = b._2.asInstanceOf[Int]
val newValue = nextInt(oldValue)
val nextValue = accept(oldValue, newValue, beliefs.asInstanceOf[List[(Double, Int)]])
val nextValue = if (o.density(newValue) > 0.0) {
accept(oldValue, newValue, beliefs.asInstanceOf[List[(Double, Int)]])
} else oldValue
(sampleDensity, nextValue)
})
}
case a: Atomic[_] => {
case a: Atomic[Double] => { // The double is unchecked, bad stuff if the atomic is not double
beliefs.map(b => {
val oldValue = b._2.asInstanceOf[Double]
val newValue = nextDouble(oldValue)
val nextValue = accept(oldValue, newValue, beliefs.asInstanceOf[List[(Double, Double)]])
val nextValue = if (a.density(newValue) > 0.0) {
accept(oldValue, newValue, beliefs.asInstanceOf[List[(Double, Double)]])
} else oldValue
(sampleDensity, nextValue)
})
}
Expand All @@ -109,7 +113,7 @@ class ParticleGenerator(de: DensityEstimator) {
}

object ParticleGenerator {
var defaultArgSamples = 10
var defaultArgSamples = 20
var defaultTotalSamples = 50

private val samplerMap: Map[Universe, ParticleGenerator] = Map()
Expand All @@ -118,17 +122,18 @@ object ParticleGenerator {

def clear() = samplerMap.clear

def apply(univ: Universe, de: DensityEstimator): ParticleGenerator =
def apply(univ: Universe, de: DensityEstimator, numArgSamples: Int, numTotalSamples: Int): ParticleGenerator =
samplerMap.get(univ) match {
case Some(e) => e
case None => {
samplerMap += (univ -> new ParticleGenerator(de))
samplerMap += (univ -> new ParticleGenerator(de, numArgSamples, numTotalSamples))
univ.registerUniverse(samplerMap)
samplerMap(univ)
}
}

def apply(univ: Universe): ParticleGenerator = apply(univ, new ConstantDensityEstimator)
def apply(univ: Universe): ParticleGenerator = apply(univ, new ConstantDensityEstimator,
defaultArgSamples, defaultTotalSamples)

def exists(univ: Universe): Boolean = samplerMap.contains(univ)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ trait Node
/**
* Class for FactorNodes in a FactorGraph
*/
final class FactorNode(val variables: List[Variable[_]]) extends Node {
final case class FactorNode(val variables: Set[Variable[_]]) extends Node {
override def toString() = "F(" + variables.map(_.id).mkString(",") + ")"
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ class BasicFactorGraph(factors: List[Factor[Double]], semiring: Semiring[Double]
def toMutableMap(m: Map[Node, Factor[Double]]): scala.collection.mutable.Map[Node, Factor[Double]] =
scala.collection.mutable.Map[Node, Factor[Double]]() ++ m

private[figaro] val factorsByNode = combineFactors.map(factor => (new FactorNode(factor.variables) -> (factor))).toMap
private[figaro] val factorsByNode = combineFactors.map(factor => (new FactorNode(factor.variables.toSet) -> (factor))).toMap

private[figaro] val adjacencyList = (adjacencyListFactors() ++ adjacencyListVariables()).map(m => m._1 -> toMutableMap(m._2))

Expand All @@ -81,5 +81,5 @@ class BasicFactorGraph(factors: List[Factor[Double]], semiring: Semiring[Double]
this
}

def contains(v: Variable[_]): Boolean = adjacencyList.contains(VariableNode(v))
def contains(v: Node): Boolean = adjacencyList.contains(v)
}
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ trait BeliefPropagation[T] extends FactoredAlgorithm[T] {

val f = if (messageList.isEmpty) {
source match {
case fn: FactorNode => factorGraph.uniformFactor(fn.variables)
case fn: FactorNode => factorGraph.uniformFactor(fn.variables.toList)
case vn: VariableNode => factorGraph.uniformFactor(List(vn.variable))
}
} else {
Expand Down Expand Up @@ -174,6 +174,14 @@ trait BeliefPropagation[T] extends FactoredAlgorithm[T] {
println()
}
synchronousUpdate()
if (debug) {
beliefMap.foreach(a => println(a._1 + " => " + a._2)); println
println("Factor Messages:")
factorGraph.getNodes.foreach{n =>
println(n + ": ")
println(factorGraph.getMessagesForNode(n))
}
}
}

override def initialize() = {
Expand All @@ -193,7 +201,7 @@ trait ProbabilisticBeliefPropagation extends BeliefPropagation[Double] {
def normalize(factor: Factor[Double]): Factor[Double] = {
val z = semiring.sumMany(factor.contents.values)
// Since we're in log space, d - z = log(exp(d)/exp(z))
factor.mapTo((d: Double) => if (z != semiring.zero) d - z else semiring.zero, factor.variables)
factor.mapTo((d: Double) => if (z != semiring.zero) d - z else semiring.zero, factor.variables)
}

/*
Expand All @@ -219,13 +227,13 @@ trait ProbabilisticBeliefPropagation extends BeliefPropagation[Double] {
}

private[figaro] def makeLogarithmic(factor: Factor[Double]): Factor[Double] = {
factor.mapTo((d: Double) => Math.log(d), factor.variables)
factor.mapTo((d: Double) => Math.log(d), factor.variables)
}

private[figaro] def unmakeLogarithmic(factor: Factor[Double]): Factor[Double] = {
factor.mapTo((d: Double) => Math.exp(d), factor.variables)
}

/**
* Get the belief for an element
*/
Expand Down Expand Up @@ -316,10 +324,10 @@ abstract class ProbQueryBeliefPropagation(override val universe: Universe, targe

val semiring = LogSumProductSemiring

var neededElements: List[Element[_]] = _
var needsBounds: Boolean = _
override def initialize() = {
var neededElements: List[Element[_]] = _
var needsBounds: Boolean = _

def generateGraph() = {
val needs = getNeededElements(starterElements, depth)
neededElements = needs._1
needsBounds = needs._2
Expand All @@ -331,7 +339,11 @@ abstract class ProbQueryBeliefPropagation(override val universe: Universe, targe
getFactors(neededElements, targetElements)
}

factorGraph = new BasicFactorGraph(factors, semiring): FactorGraph[Double]
factorGraph = new BasicFactorGraph(factors, semiring): FactorGraph[Double]
}

override def initialize() = {
if (factorGraph == null) generateGraph()
super.initialize
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ trait FactorGraph[T] {
/**
* Returns true if the graph contains a node for a (single) variable
*/
def contains(v: Variable[_]): Boolean
def contains(v: Node): Boolean

/**
* Returns all nodes in the factor graph
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,12 @@ trait OneTimeInnerBPHandler extends InnerBPHandler {
* Number of iterations to run BP per step.
*/
val innerIterations: Int

protected def createBP(targets: List[Element[_]], depth: Int = Int.MaxValue, upperBounds: Boolean = false): Unit = {
bp = new ProbQueryBeliefPropagation(currentUniverse, targets: _*)(List(),
(u: Universe, e: List[NamedEvidence[_]]) => () => ProbEvidenceSampler.computeProbEvidence(10000, e)(u), depth, upperBounds)
with OneTimeProbabilisticBeliefPropagation with OneTimeProbQuery { override val iterations = innerIterations;}
bp = new ProbQueryBeliefPropagation(currentUniverse, targets: _*)(List(),
(u: Universe, e: List[NamedEvidence[_]]) => () => ProbEvidenceSampler.computeProbEvidence(10000, e)(u), depth, upperBounds) with OneTimeProbabilisticBeliefPropagation with OneTimeProbQuery {
override val iterations = innerIterations
}
}

protected def runBP() {
Expand All @@ -62,9 +63,8 @@ trait AnytimeInnerBPHandler extends InnerBPHandler {
val myStepTimeMillis: Long

protected def createBP(targets: List[Element[_]], depth: Int = Int.MaxValue, upperBounds: Boolean = false): Unit = {
bp = new ProbQueryBeliefPropagation(currentUniverse, targets: _*)(List(),
(u: Universe, e: List[NamedEvidence[_]]) => () => ProbEvidenceSampler.computeProbEvidence(10000, e)(u), depth, upperBounds)
with AnytimeProbabilisticBeliefPropagation with AnytimeProbQuery
bp = new ProbQueryBeliefPropagation(currentUniverse, targets: _*)(List(),
(u: Universe, e: List[NamedEvidence[_]]) => () => ProbEvidenceSampler.computeProbEvidence(10000, e)(u), depth, upperBounds) with AnytimeProbabilisticBeliefPropagation with AnytimeProbQuery
}

protected def runBP() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,13 @@ class LazyValues(universe: Universe) {
* or if a previous call has resulted in a result with no Star, the previous result is reused.
*/
def apply[T](element: Element[T], depth: Int): ValueSet[T] = {
apply(element, depth, ParticleGenerator.defaultArgSamples, ParticleGenerator.defaultTotalSamples )
val (numArgSamples, numTotalSamples) = if (ParticleGenerator.exists(universe)) {
val pg = ParticleGenerator(universe)
(pg.numArgSamples, pg.numTotalSamples )
} else {
(ParticleGenerator.defaultArgSamples, ParticleGenerator.defaultTotalSamples)
}
apply(element, depth, numArgSamples, numTotalSamples)
}

def apply[T](element: Element[T], depth: Int, numArgSamples: Int, numTotalSamples: Int): ValueSet[T] = {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
package com.cra.figaro.algorithm.learning

import com.cra.figaro.language.Parameter

abstract class EMTerminationCriteria(val alg: ExpectationMaximization) {
type SufficientStatistics = Map[Parameter[_], Seq[Double]]
def apply(s: SufficientStatistics): Boolean
}

class LikelihoodTermination(val tolerance: Double, alg: ExpectationMaximization) extends EMTerminationCriteria(alg) {
var previousLikelihood = Double.NegativeInfinity
override def apply(s: SufficientStatistics): Boolean = {
false
}
}

class MaxIterations(val iterations: Int, alg: ExpectationMaximization) extends EMTerminationCriteria(alg) {
var currentIterations = 0
override def apply(s: SufficientStatistics): Boolean = {
currentIterations += 1
if (currentIterations < iterations) false else true
}
}

class SufficientStatisticsMagnitudes(val tolerance: Double, alg: ExpectationMaximization) extends EMTerminationCriteria(alg) {
var previousSufficientStatistics = Map.empty[Parameter[_], Seq[Double]]

def difference(x: Seq[Double], y: Seq[Double]): Double = {
require(x.size == y.size)
val sum = (for ((a, b) <- x zip y) yield Math.abs(a - b).toDouble)
sum.sum/(x.size.toDouble)
}

override def apply(s: SufficientStatistics): Boolean = {
if (previousSufficientStatistics.isEmpty) {
previousSufficientStatistics = s
return false
}

val delta = for (k <- s.keys) yield {
difference(s(k),previousSufficientStatistics(k))
}
val totalDelta = delta.sum/(delta.size.toDouble)
previousSufficientStatistics = s
if (totalDelta < tolerance) {
return true
}
return false
}
}

class BICTermination(val tolerance: Double, alg: ExpectationMaximization) extends EMTerminationCriteria(alg) {
override def apply(s: SufficientStatistics): Boolean = {
false
}
}
Loading

0 comments on commit 866849f

Please sign in to comment.