Improved GammaPower implementation (#195)

GammaPower creates a point mass whenever Rate would be infinite.
Added GammaPower.FromMeanAndMeanLog.
Improved numerical accuracy of GammaPower.GetLogProb, GetMean, and GetMode.
Added GammaPowerEstimator
GammaProductOp supports GammaPower distributions.
GammaProductOp handles uniform message from product.
Added GammaPowerProductOp_Laplace
Fixed PowerOp for GammaPower distributions.
Added PlusGammaOp for GammaPower distributions.
MMath.GammaUpper has an unregularized option.
Added TruncatedGamma.GetMeanPower.
PowerOp supports TruncatedGamma.
Swapped the argument order of (internal) MMath.LargestDoubleProduct and LargestDoubleRatio.
This commit is contained in:
Tom Minka 2019-11-13 14:31:12 +00:00 коммит произвёл GitHub
Родитель d6e7d5b975
Коммит 57c888024c
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
23 изменённых файлов: 2475 добавлений и 574 удалений

Просмотреть файл

@ -1602,7 +1602,7 @@ namespace Microsoft.ML.Probabilistic.Models
/// contains more than two <c>Range</c> objects.</exception>
public static IVariableArray Array<T>(IList<Range> ranges)
{
if (ranges.Count == 0) throw new ArgumentException("Range list is empty.",nameof(ranges));
if (ranges.Count == 0) throw new ArgumentException("Range list is empty.", nameof(ranges));
else if (ranges.Count == 1) return Variable.Array<T>(ranges[0]);
else if (ranges.Count == 2) return Variable.Array<T>(ranges[0], ranges[1]);
else throw new NotSupportedException("More than two ranges were specified, high rank arrays are not yet supported.");
@ -5560,7 +5560,7 @@ namespace Microsoft.ML.Probabilistic.Models
MethodInfo method = type.GetMethod("CreateVariableArrayFromItem", BindingFlags.NonPublic | BindingFlags.Static);
return (IVariableArray)Util.Invoke(method, null, array, headRanges);
}
Variable<T> itemPrototype = (Variable<T>)item.Clone();
VariableArray<T> variableArray = new VariableArray<T>(itemPrototype, ranges[0]);
@ -6386,12 +6386,17 @@ namespace Microsoft.ML.Probabilistic.Models
/// </summary>
protected static Variable<bool> GreaterThan(Variable<T> a, Variable<T> b)
{
Variable<bool> f = OperatorFactor<bool>(Operator.GreaterThan, a, b);
if ((object)f == null) f = OperatorFactor<bool>(Operator.LessThan, b, a);
if ((object)f == null) f = NotOrNull(OperatorFactor<bool>(Operator.LessThanOrEqual, a, b));
if ((object)f == null) f = NotOrNull(OperatorFactor<bool>(Operator.GreaterThanOrEqual, b, a));
if ((object)f != null) return f;
else if (typeof(double).IsAssignableFrom(typeof(T)))
return OperatorFactor<bool>(Operator.GreaterThan, a, b)
?? OperatorFactor<bool>(Operator.LessThan, b, a)
?? NotOrNull(OperatorFactor<bool>(Operator.LessThanOrEqual, a, b))
?? NotOrNull(OperatorFactor<bool>(Operator.GreaterThanOrEqual, b, a))
?? GreaterThanFromMinus(a, b)
?? throw new InvalidOperationException("Neither of the operators (<,<=,>,>=) has a registered factor for argument type " + typeof(T) + ".");
}
private static Variable<bool> GreaterThanFromMinus(Variable<T> a, Variable<T> b)
{
if (typeof(double).IsAssignableFrom(typeof(T)))
{
Variable<T> diff;
if (b.IsObserved && b.IsReadOnly && b.IsBase && b.ObservedValue.Equals(0.0))
@ -6400,22 +6405,23 @@ namespace Microsoft.ML.Probabilistic.Models
}
else if (a.IsObserved && a.IsReadOnly && a.IsBase && a.ObservedValue.Equals(0.0))
{
return !GreaterThanOrEqual(b, a);
diff = OperatorFactor<T>(Operator.Negative, b);
}
else
{
diff = OperatorFactor<T>(Operator.Minus, a, b);
diff = null;
}
if ((object)diff == null)
{
throw new InvalidOperationException("None of the operators (<,>,-) has a registered factor for argument type " + typeof(T) + ".");
diff = OperatorFactor<T>(Operator.Minus, a, b);
}
return IsPositive((Variable<double>)(Variable)diff);
}
else
{
throw new InvalidOperationException("Neither of the operators (<,>) has a registered factor for argument type " + typeof(T) + ".");
if ((object)diff != null)
{
return IsPositive((Variable<double>)(Variable)diff);
}
// Fall through
}
return null;
}
private static Variable<bool> NotOrNull(Variable<bool> Variable)
@ -6428,36 +6434,12 @@ namespace Microsoft.ML.Probabilistic.Models
/// </summary>
protected static Variable<bool> GreaterThanOrEqual(Variable<T> a, Variable<T> b)
{
Variable<bool> f = OperatorFactor<bool>(Operator.GreaterThanOrEqual, a, b);
if ((object)f == null) f = OperatorFactor<bool>(Operator.LessThanOrEqual, b, a);
if ((object)f == null) f = NotOrNull(OperatorFactor<bool>(Operator.LessThan, a, b));
if ((object)f == null) f = NotOrNull(OperatorFactor<bool>(Operator.GreaterThan, b, a));
if ((object)f != null) return f;
else if (typeof(double).IsAssignableFrom(typeof(T)))
{
Variable<T> diff;
if (b.IsObserved && b.IsReadOnly && b.IsBase && b.ObservedValue.Equals(0.0))
{
diff = a;
}
else if (a.IsObserved && a.IsReadOnly && a.IsBase && a.ObservedValue.Equals(0.0))
{
return !GreaterThan(b, a);
}
else
{
diff = OperatorFactor<T>(Operator.Minus, a, b);
}
if ((object)diff == null)
{
throw new InvalidOperationException("None of the operators (<,>,-) has a registered factor for argument type " + typeof(T) + ".");
}
return IsPositive((Variable<double>)(Variable)diff); // should be IsPositiveOrZero
}
else
{
throw new InvalidOperationException("Neither of the operators (<,>) has a registered factor for argument type " + typeof(T) + ".");
}
return OperatorFactor<bool>(Operator.GreaterThanOrEqual, a, b)
?? OperatorFactor<bool>(Operator.LessThanOrEqual, b, a)
?? NotOrNull(OperatorFactor<bool>(Operator.LessThan, a, b))
?? NotOrNull(OperatorFactor<bool>(Operator.GreaterThan, b, a))
?? NotOrNull(GreaterThanFromMinus(b, a))
?? throw new InvalidOperationException("Neither of the operators (<,<=,>,>=) has a registered factor for argument type " + typeof(T) + ".");
}
/// <summary>

Просмотреть файл

@ -757,17 +757,52 @@ namespace Microsoft.ML.Probabilistic.Math
else // x >= 6
{
double sum = LnSqrt2PI;
while (x < 10)
while (x < GammaLnLargeX)
{
sum -= Math.Log(x);
x++;
}
// x >= 10
// x >= GammaLnLargeX
// Use asymptotic series
return GammaLnSeries(x) + (x - 0.5) * Math.Log(x) - x + sum;
}
}
/// <summary>
/// Computes the logarithm of the Pochhammer function, divided by n: (GammaLn(x + n) - GammaLn(x))/n
/// </summary>
/// <param name="x">A real number &gt; 0</param>
/// <param name="n">If zero, result is 0.</param>
/// <returns></returns>
public static double RisingFactorialLnOverN(double x, double n)
{
// To ensure that the result increases with n, we group terms in n.
if (x <= 0) throw new ArgumentOutOfRangeException(nameof(x), x, "x <= 0");
else if (n == 0) return 0;
else if (x < 1e-16 && x + n < 1e-16)
{
// GammaLn(x) = -log(x)
return -Log1Plus(n / x) / n;
}
else if (Math.Abs(n / x) < 1e-6)
{
// x >= 1e-6 ensures that Tetragamma doesn't overflow
// For small x, Digamma(x) = -1/x, Trigamma(x) = 1/x^2, Tetragamma(x) = -1/x^3
// To ignore the next term, we need 1/x to dominate n^3/x^4, i.e. 1 >> n^3/x^3
return MMath.Digamma(x) + 0.5 * n * (MMath.Trigamma(x) + n / 3 * MMath.Tetragamma(x));
}
else if (x > GammaLnLargeX && x + n > GammaLnLargeX)
{
double nOverX = n / x;
if (Math.Abs(nOverX) < 1e-8)
// log(1 + x) = x - 0.5*x^2 when x^2 << 1
return Log1Plus(nOverX) - 0.5 * (1 - 0.5 / x) * nOverX + (GammaLnSeries(x + n) - GammaLnSeries(x)) / n - 0.5 / x + Math.Log(x);
else
return (1 + (x - 0.5) / n) * Log1Plus(nOverX) + (GammaLnSeries(x + n) - GammaLnSeries(x)) / n - 1 + Math.Log(x);
}
else return (MMath.GammaLn(x + n) - MMath.GammaLn(x)) / n;
}
/* Python code to generate this table (must not be indented):
for k in range(2,26):
print(" %0.20g," % ((-1)**k*(zeta(k)-1)/k))
@ -1102,10 +1137,11 @@ for k in range(2,26):
/// <summary>
/// Compute the regularized upper incomplete Gamma function: int_x^inf t^(a-1) exp(-t) dt / Gamma(a)
/// </summary>
/// <param name="a">The shape parameter, &gt; 0</param>
/// <param name="a">The shape parameter. Must be &gt; 0 if regularized is true or x is 0.</param>
/// <param name="x">The lower bound of the integral, &gt;= 0</param>
/// <param name="regularized">If true, result is divided by Gamma(a)</param>
/// <returns></returns>
public static double GammaUpper(double a, double x)
public static double GammaUpper(double a, double x, bool regularized = true)
{
// special cases:
// GammaUpper(1,x) = exp(-x)
@ -1113,6 +1149,11 @@ for k in range(2,26):
// GammaUpper(a,x) = GammaUpper(a-1,x) + x^(a-1) exp(-x) / Gamma(a)
if (x < 0)
throw new ArgumentException($"x ({x}) < 0");
if (!regularized)
{
if (a < 1) return GammaUpperConFrac(a, x, regularized);
else return Gamma(a) * GammaUpper(a, x, true);
}
if (a <= 0)
throw new ArgumentException($"a ({a}) <= 0");
if (x == 0) return 1; // avoid 0/0
@ -1394,14 +1435,16 @@ f = 1/gamma(x+1)-1
/// <summary>
/// Compute the regularized upper incomplete Gamma function by a continued fraction
/// </summary>
/// <param name="a">A real number &gt; 0</param>
/// <param name="a">A real number. Must be &gt; 0 if regularized is true.</param>
/// <param name="x">A real number &gt;= 1.1</param>
/// <param name="regularized">If true, result is divded by Gamma(a)</param>
/// <returns></returns>
private static double GammaUpperConFrac(double a, double x)
private static double GammaUpperConFrac(double a, double x, bool regularized = true)
{
double scale = GammaUpperScale(a, x);
double scale = regularized ? GammaUpperScale(a, x) : Math.Exp(a * Math.Log(x) - x);
if (scale == 0)
return scale;
if (x > double.MaxValue) return 0.0;
// the confrac coefficients are:
// a_i = -i*(i-a)
// b_i = x+1-a+2*i
@ -1439,7 +1482,7 @@ f = 1/gamma(x+1)-1
private static double GammaLnSeries(double x)
{
// GammaLnSeries(10) = 0.008330563433362871
if (x < 10)
if (x < GammaLnLargeX)
{
return MMath.GammaLn(x) - (x - 0.5) * Math.Log(x) + x - LnSqrt2PI;
}
@ -2502,7 +2545,7 @@ rr = mpf('-0.99999824265582826');
double rPlus1 = omr2 / (1 - r);
return xPlusy - rPlus1 * y;
}
else if (r > 0.75 && Math.Abs(x-y) < 0.5 * Math.Abs(y))
else if (r > 0.75 && Math.Abs(x - y) < 0.5 * Math.Abs(y))
{
double omr = omr2 / (1 + r);
return x - y + omr * y;
@ -3423,6 +3466,8 @@ rr = mpf('-0.99999824265582826');
return 0.0;
else if (x > y)
return Math.Exp(x + MMath.Log1MinusExp(y - x));
else if (double.IsNaN(x) || double.IsNaN(y))
return double.NaN;
else
return -DifferenceOfExp(y, x);
}
@ -4315,15 +4360,95 @@ else if (m < 20.0 - 60.0/11.0 * s) {
return BitConverter.Int64BitsToDouble(bits - 1);
}
/// <summary>
/// Returns the largest value such that value * denominator &lt;= product.
/// </summary>
/// <param name="numerator"></param>
/// <param name="denominator"></param>
/// <returns></returns>
internal static double LargestDoubleRatio(double numerator, double denominator)
{
if (denominator < 0) return LargestDoubleRatio(-numerator, -denominator);
if (denominator == 0)
{
if (double.IsNaN(numerator)) return double.PositiveInfinity;
else if (numerator >= 0)
return double.MaxValue;
else
return double.NaN;
}
// denominator > 0
if (double.IsPositiveInfinity(numerator)) return numerator;
if (double.IsPositiveInfinity(denominator))
{
if (double.IsNaN(numerator)) return 0;
else return PreviousDouble(0);
}
double lowerBound, upperBound;
if (denominator >= 1)
{
if (double.IsNegativeInfinity(numerator))
{
upperBound = NextDouble(numerator) / denominator;
if (AreEqual(upperBound * denominator, numerator)) return upperBound;
else return PreviousDouble(upperBound);
}
// ratio cannot be infinite since numerator is not infinite.
double ratio = numerator / denominator;
lowerBound = PreviousDouble(ratio);
upperBound = NextDouble(ratio);
}
else // 0 < denominator < 1
{
// avoid infinite bounds
if (numerator == double.Epsilon) lowerBound = numerator / denominator / 2; // cannot overflow
else if (numerator == 0) lowerBound = 0;
else lowerBound = (double)Math.Max(double.MinValue, Math.Min(double.MaxValue, PreviousDouble(numerator) / denominator));
if (numerator == -double.Epsilon) upperBound = numerator / denominator / 2; // cannot overflow
else upperBound = (double)Math.Min(double.MaxValue, NextDouble(numerator) / denominator);
if (double.IsNegativeInfinity(upperBound)) return upperBound; // must have ratio < -1 and denominator > 1
}
int iterCount = 0;
while (true)
{
iterCount++;
double value = (double)Average(lowerBound, upperBound);
if (value < lowerBound || value > upperBound) throw new Exception($"value={value:r}, lowerBound={lowerBound:r}, upperBound={upperBound:r}, denominator={denominator:r}, ratio={numerator:r}");
if ((double)(value * denominator) <= numerator)
{
double value2 = NextDouble(value);
if (value2 == value || (double)(value2 * denominator) > numerator)
{
// Used for performance debugging
//if (iterCount > 100)
// throw new Exception();
return value;
}
else
{
// value is too low
lowerBound = value2;
if (lowerBound > upperBound || double.IsNaN(lowerBound)) throw new Exception($"value={value:r}, lowerBound={lowerBound:r}, upperBound={upperBound:r}, denominator={denominator:r}, ratio={numerator:r}");
}
}
else
{
// value is too high
upperBound = PreviousDouble(value);
if (lowerBound > upperBound || double.IsNaN(upperBound)) throw new Exception($"value={value:r}, lowerBound={lowerBound:r}, upperBound={upperBound:r}, denominator={denominator:r}, ratio={numerator:r}");
}
}
}
/// <summary>
/// Returns the largest value such that value/denominator &lt;= ratio.
/// </summary>
/// <param name="denominator"></param>
/// <param name="ratio"></param>
/// <param name="denominator"></param>
/// <returns></returns>
internal static double LargestDoubleProduct(double denominator, double ratio)
internal static double LargestDoubleProduct(double ratio, double denominator)
{
if (denominator < 0) return LargestDoubleProduct(-denominator, -ratio);
if (denominator < 0) return LargestDoubleProduct(-ratio, -denominator);
if (denominator == 0)
{
if (double.IsNaN(ratio)) return 0;
@ -4344,7 +4469,7 @@ else if (m < 20.0 - 60.0/11.0 * s) {
double lowerBound, upperBound;
if (denominator <= 1)
{
if(double.IsNegativeInfinity(ratio))
if (double.IsNegativeInfinity(ratio))
{
upperBound = denominator * NextDouble(ratio);
if (AreEqual(upperBound / denominator, ratio)) return upperBound;
@ -4525,6 +4650,8 @@ else if (m < 20.0 - 60.0/11.0 * s) {
private const double TOLERANCE = 1.0e-7;
private const double GammaLnLargeX = 10;
/// <summary>
/// Math.Sqrt(2*Math.PI)
/// </summary>

Просмотреть файл

@ -320,7 +320,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
if (x < 0)
throw new ArgumentOutOfRangeException(nameof(x), x, "x < 0");
double a;
if(double.IsPositiveInfinity(x))
if (double.IsPositiveInfinity(x))
{
if (ddLogP < 0) return Gamma.PointMass(x);
else if (ddLogP == 0) a = 0.0;
@ -334,7 +334,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
else
{
a = -x * x * ddLogP;
if (a+1 > double.MaxValue) return Gamma.PointMass(x);
if (a + 1 > double.MaxValue) return Gamma.PointMass(x);
}
if (forceProper)
{
@ -489,11 +489,11 @@ namespace Microsoft.ML.Probabilistic.Distributions
{
throw new ImproperDistributionException(this);
}
else if(MMath.AreEqual(probability, 0))
else if (MMath.AreEqual(probability, 0))
{
return 0;
}
else if(MMath.AreEqual(probability, 1))
else if (MMath.AreEqual(probability, 1))
{
return double.PositiveInfinity;
}
@ -507,15 +507,15 @@ namespace Microsoft.ML.Probabilistic.Distributions
// Binary search
double lowerBound = 0;
double upperBound = double.MaxValue;
while(lowerBound < upperBound)
while (lowerBound < upperBound)
{
double average = MMath.Average(lowerBound, upperBound);
double p = GetProbLessThan(average);
if(p == probability)
if (p == probability)
{
return average;
}
else if(p < probability)
else if (p < probability)
{
lowerBound = MMath.NextDouble(average);
}
@ -598,15 +598,25 @@ namespace Microsoft.ML.Probabilistic.Distributions
public static double GetLogProb(double x, double shape, double rate)
{
if (x < 0) return double.NegativeInfinity;
if (double.IsPositiveInfinity(x))
{
if (x > double.MaxValue) // Avoid subtracting infinities below
{
if (rate > 0) return -x;
else if (rate < 0) return x;
// fall through when rate == 0
}
if (shape > 1e10 && IsProper(shape, rate))
{
// In double precision, we can assume GammaLn(x) = (x-0.5)*log(x) - x for x > 1e10
// Also log(1-1/x) = -1/x - 0.5/x^2 for x > 1e10
// We compute the density in a way that ensures the maximum is at the mode returned by GetMode.
double mode = (shape - 1) / rate; // cannot be zero
double xOverMode = x / mode;
if (xOverMode > double.MaxValue) return double.NegativeInfinity;
else return (shape - 1) * (Math.Log(xOverMode) + (1 - xOverMode)) + (0.5 + 0.5 / shape) / shape + Math.Log(rate) - 0.5 * Math.Log(shape);
}
double result = 0;
if (shape != 1) result += (shape - 1) * Math.Log(x);
if (rate != 0) result -= x * rate;
if (rate != 0 && x != 0) result -= x * rate;
if (IsProper(shape, rate))
{
result += shape * Math.Log(rate) - MMath.GammaLn(shape);

Просмотреть файл

@ -24,18 +24,10 @@ namespace Microsoft.ML.Probabilistic.Distributions
/// <summary>
/// Where to accumulate means and variances
/// </summary>
public MeanVarianceAccumulator mva;
public MeanVarianceAccumulator mva = new MeanVarianceAccumulator();
/// <summary>
/// Creates a new Gamma estimator
/// </summary>
public GammaEstimator()
{
mva = new MeanVarianceAccumulator();
}
/// <summary>
/// Retrieves the Gamma estimator
/// Retrieves the estimated Gamma
/// </summary>
/// <param name="result">Where to put the result</param>
/// <returns>The resulting distribution</returns>
@ -54,6 +46,12 @@ namespace Microsoft.ML.Probabilistic.Distributions
{
Add(distribution, 1.0);
}
/// <summary>
/// Adds a Gamma distribution item to the estimator
/// </summary>
/// <param name="distribution">The distribution instance to add</param>
/// <param name="weight">The weight of the sample</param>
public void Add(Gamma distribution, double weight)
{
double x, noiseVariance;

Просмотреть файл

@ -5,13 +5,14 @@
namespace Microsoft.ML.Probabilistic.Distributions
{
using System;
using System.Diagnostics;
using System.Runtime.Serialization;
using Factors.Attributes;
using Math;
using Microsoft.ML.Probabilistic.Serialization;
using Utilities;
/// <summary>
/// The distribution of a Gamma variable raised to a power. The Weibull distribution is a special case.
/// </summary>
@ -77,14 +78,8 @@ namespace Microsoft.ML.Probabilistic.Distributions
/// <returns>Variance</returns>
public double GetVariance()
{
if (IsPointMass) return 0;
else if (Rate == 0.0) return Double.PositiveInfinity;
else if (!IsProper()) throw new ImproperDistributionException(this);
else
{
double mean = GetMean();
return GetMeanPower(2) - mean*mean;
}
GetMeanAndVariance(out double mean, out double variance);
return variance;
}
/// <summary>
@ -94,24 +89,32 @@ namespace Microsoft.ML.Probabilistic.Distributions
/// <param name="variance">Where to put the variance</param>
public void GetMeanAndVariance(out double mean, out double variance)
{
if (Power == 0)
{
mean = 1;
variance = 0;
}
if (IsPointMass)
{
mean = Point;
variance = 0;
}
else if (Rate == 0.0)
{
mean = Double.PositiveInfinity;
variance = Double.PositiveInfinity;
}
else if (!IsProper())
else if (!IsProper() && Rate != 0)
{
throw new ImproperDistributionException(this);
}
else
{
mean = GetMean();
variance = GetMeanPower(2) - mean*mean;
double logMean = GetLogMeanPower(1);
mean = Math.Exp(logMean);
if (Rate == 0 && Shape + 2 * Power == 0 && 2 * Power < -1) variance = 0;
else if (Shape + 2 * Power <= 0) variance = double.PositiveInfinity;
else
{
double logMean2 = GetLogMeanPower(2);
if (logMean2 > double.MaxValue) variance = double.PositiveInfinity;
else variance = MMath.DifferenceOfExp(logMean2, 2 * logMean);
}
}
}
@ -122,6 +125,8 @@ namespace Microsoft.ML.Probabilistic.Distributions
/// <param name="variance">Variance</param>
public void SetMeanAndVariance(double mean, double variance)
{
if (double.IsNaN(mean)) throw new ArgumentOutOfRangeException(nameof(mean), mean, "mean is NaN");
if (double.IsNaN(variance)) throw new ArgumentOutOfRangeException(nameof(variance), variance, "variance is NaN");
if (variance == 0)
{
Point = mean;
@ -138,29 +143,34 @@ namespace Microsoft.ML.Probabilistic.Distributions
{
// mean = a/b
// variance = a/b^2
Rate = mean/variance;
Shape = mean*Rate;
Rate = mean / variance;
Shape = mean * Rate;
}
else if (Power == -1)
{
// mean = b/(a-1)
// variance = b^2/(a-1)^2/(a-2)
Shape = mean*mean/variance + 2;
Rate = mean*(Shape - 1);
double mean2 = mean * mean;
double r = double.IsPositiveInfinity(mean2) ? 1.0 : mean2 / variance;
Shape = r + 2;
Rate = mean * (Shape - 1);
if (double.IsNaN(Shape)) throw new Exception();
}
else if (Power == 2)
{
// mean = a*(a+1)/b^2
// variance = a*(a+1)*(4*a+6)/b^4
double r = mean*mean/variance;
double b = 2*r - 0.5;
Shape = Math.Sqrt(b*b + 6*r) + b;
Rate = Math.Sqrt(Shape*(Shape + 1)/mean);
double mean2 = mean * mean;
double r = double.IsPositiveInfinity(mean2) ? 1.0 : mean2 / variance;
double b = 2 * r - 0.5;
Shape = Math.Sqrt(b * b + 6 * r) + b;
Rate = Math.Sqrt(Shape * (Shape + 1) / mean);
}
else
{
throw new NotImplementedException("Not implemented for power " + Power);
}
CheckForPointMass();
}
/// <summary>
@ -186,9 +196,19 @@ namespace Microsoft.ML.Probabilistic.Distributions
/// <param name="power"></param>
public void SetShapeAndRate(double shape, double rate, double power)
{
this.Power = power;
this.Shape = shape;
this.Rate = rate;
this.Power = power;
CheckForPointMass();
}
private void CheckForPointMass()
{
if (!IsPointMass && Rate > double.MaxValue)
{
Rate = Math.Pow(0, Power);
SetToPointMass();
}
}
/// <summary>
@ -202,9 +222,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
public static GammaPower FromShapeAndRate(double shape, double rate, double power)
{
GammaPower result = new GammaPower();
result.Shape = shape;
result.Rate = rate;
result.Power = power;
result.SetShapeAndRate(shape, rate, power);
return result;
}
@ -216,13 +234,14 @@ namespace Microsoft.ML.Probabilistic.Distributions
/// <param name="power"></param>
public void SetShapeAndScale(double shape, double scale, double power)
{
this.Shape = shape;
this.Rate = 1.0/scale;
this.Power = power;
this.Shape = shape;
this.Rate = 1.0 / scale;
CheckForPointMass();
}
/// <summary>
/// Constructs a Gamma distribution with the given shape and scale parameters.
/// Constructs a GammaPower distribution with the given shape and scale parameters.
/// </summary>
/// <param name="shape">shape</param>
/// <param name="scale">scale</param>
@ -235,6 +254,42 @@ namespace Microsoft.ML.Probabilistic.Distributions
return result;
}
/// <summary>
/// Constructs a GammaPower distribution with the given mean and mean logarithm.
/// </summary>
/// <param name="mean">Desired expected value.</param>
/// <param name="meanLog">Desired expected logarithm.</param>
/// <param name="power">Desired power.</param>
/// <returns>A new GammaPower distribution.</returns>
/// <remarks>This function is equivalent to maximum-likelihood estimation of a Gamma distribution
/// from data given by sufficient statistics.
/// This function is significantly slower than the other constructors since it
/// involves nonlinear optimization. The algorithm is a generalized Newton iteration,
/// described in "Estimating a Gamma distribution" by T. Minka, 2002.
/// </remarks>
public static GammaPower FromMeanAndMeanLog(double mean, double meanLog, double power)
{
// Constraints:
// mean = Gamma(Shape + power)/Gamma(Shape)/Rate^power
// meanLog = power*(digamma(Shape) - log(Rate))
// digamma(Shape) =approx log(Shape - 0.5)
double logMeanOverPower = Math.Log(mean) / power;
double meanLogOverPower = meanLog / power;
double shape = 1;
double logRate = 0;
for (int iter = 0; iter < 1000; iter++)
{
double oldLogRate = logRate;
double oldShape = shape;
logRate = MMath.RisingFactorialLnOverN(shape, power) - logMeanOverPower;
shape = Math.Exp(meanLogOverPower + logRate) + 0.5;
//Console.WriteLine($"shape = {shape:r}, logRate = {logRate:r}");
if (MMath.AreEqual(oldLogRate, logRate) && MMath.AreEqual(oldShape, shape)) break;
if (double.IsNaN(shape)) throw new Exception("Failed to converge");
}
return FromShapeAndRate(shape, Math.Exp(logRate), power);
}
/// <summary>
/// Constructs a GammaPower distribution from a Gamma distribution.
/// </summary>
@ -252,9 +307,10 @@ namespace Microsoft.ML.Probabilistic.Distributions
/// <returns></returns>
public double GetMeanLog()
{
if (IsPointMass) return Math.Log(Point);
if (Power == 0.0) return 0.0;
else if (IsPointMass) return Math.Log(Point);
else if (!IsProper()) throw new ImproperDistributionException(this);
else return Power*(MMath.Digamma(Shape) - Math.Log(Rate));
else return Power * (MMath.Digamma(Shape) - Math.Log(Rate));
}
/// <summary>
@ -263,19 +319,40 @@ namespace Microsoft.ML.Probabilistic.Distributions
/// <returns></returns>
public double GetMeanPower(double power)
{
if (power == 0.0) return 1.0;
else if (IsPointMass) return Math.Pow(Point, power);
//else if (Rate == 0.0) return (power > 0) ? Double.PositiveInfinity : 0.0;
else if (!IsProper()) throw new ImproperDistributionException(this);
return Math.Exp(GetLogMeanPower(power));
}
/// <summary>
/// Computes log(E[x^power])
/// </summary>
/// <returns></returns>
public double GetLogMeanPower(double power)
{
if (power == 0.0 || Power == 0.0) return 0.0;
else if (IsPointMass) return power * Math.Log(Point);
//else if (Rate == 0.0) return (Power * power > 0) ? Double.PositiveInfinity : 0.0;
else if (Shape <= 0 || Rate < 0) throw new ImproperDistributionException(this);
else
{
double power2 = Power*power;
double power2 = Power * power;
if (Shape + power2 <= 0)
{
if (Shape + power2 == 0 && Rate == 0)
{
// Near zero, GammaLn(x) = -log(x)-log(x+1) so if rate=0 we get -(power2+1)*log(0)
// Example: GammaPower.FromShapeAndRate(1,0,-1).GetMeanPower(1) is undefined
if (power2 == -1) throw new ImproperDistributionException(this);
// Example: GammaPower.FromShapeAndRate(0,0,-1).GetMeanPower(1) = 0
else if (power2 < -1) return double.NegativeInfinity;
}
//throw new ArgumentException($"Cannot compute E[x^{power}] since shape ({Shape}) <= {-power2}");
return Double.PositiveInfinity;
}
else return Math.Exp(MMath.GammaLn(Shape + power2) - MMath.GammaLn(Shape) - power2*Math.Log(Rate));
else if (power2 > double.MaxValue) return double.PositiveInfinity;
else
{
return power2 * (MMath.RisingFactorialLnOverN(Shape, power2) - Math.Log(Rate));
}
}
}
@ -287,7 +364,45 @@ namespace Microsoft.ML.Probabilistic.Distributions
{
if (Power == 0.0) return 1.0;
else if (IsPointMass) return Point;
else return Math.Pow((Shape - Power) / Rate, Power);
else if (Shape <= Power && Rate == 0) throw new ImproperDistributionException(this);
else return GetMode(Shape, Rate, Power);
}
/// <summary>
/// Returns x where GetLogProb(x, shape, rate, power) is highest.
/// </summary>
/// <param name="shape">Shape parameter</param>
/// <param name="rate">Rate parameter</param>
/// <param name="power">Power parameter</param>
/// <returns></returns>
public static double GetMode(double shape, double rate, double power)
{
// Compute Math.Pow((shape - power)/rate, power) without overflow.
double shapeMinusPower = Math.Max(0, shape - power);
bool alwaysUseExp = true;
if (alwaysUseExp)
{
double logMode = Math.Log(shapeMinusPower) - Math.Log(rate);
return Math.Exp(logMode * power);
}
else
{
// This approach is more accurate but harder to invert
double x = shapeMinusPower / rate;
// The ratio can overflow if rate < 1 and shapeMinusPower > 1e-30
// The ratio can underflow if rate > 1 and shapeMinusPower < 1
if ((power >= -1 || power < 1) && (x == 0 || double.IsPositiveInfinity(x)))
{
// x may have overflowed or underflowed.
// log(shapeMinusPower) - log(rate) can never overflow or underflow.
double logMode = Math.Log(shapeMinusPower) - Math.Log(rate);
return Math.Exp(logMode * power);
}
else
{
return Math.Pow(x, power);
}
}
}
/// <summary>
@ -377,7 +492,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
/// <returns>True if uniform, false otherwise</returns>
public bool IsUniform()
{
return (Shape == Power) && (Rate == 0);
return !IsPointMass && (Shape == Power) && (Rate == 0);
}
/// <summary>
@ -395,14 +510,131 @@ namespace Microsoft.ML.Probabilistic.Distributions
/// </remarks>
public static double GetLogProb(double x, double shape, double rate, double power)
{
if (x < 0) return double.NegativeInfinity;
if (power == 0.0) return (x == 1.0) ? 0.0 : Double.NegativeInfinity;
double invPower = 1.0/power;
double result = (shape*invPower - 1)*Math.Log(x) - Math.Pow(x, invPower)*rate;
if (IsProper(shape, rate))
if (double.IsPositiveInfinity(x)) // Avoid subtracting infinities below
{
result += shape*Math.Log(rate) - MMath.GammaLn(shape) - Math.Log(Math.Abs(power));
if (power > 0)
{
if (rate > 0) return -x;
else if (rate < 0) return x;
// Fall through when rate == 0
}
else // This case avoids inf/inf below
{
if (shape > power) return -x;
else if (shape < power) return x;
// Fall through when shape == power
}
}
double logRate = Math.Log(rate);
double logx = Math.Log(x);
double logShapeMinusPower = Math.Log(Math.Max(0, shape - power));
double logMode = Math.Log(GetMode(shape, rate, power)); // power * (logShapeMinusPower - logRate);
if (!double.IsNegativeInfinity(logMode))
{
// We compute the density in a way that ensures the maximum is at the mode returned by GetMode.
// mode = ((shape - power)/rate)^power
// The part of the log-density that depends on x is:
// (shape/power-1)*log(x) - rate*x^(1/power)
// = (shape/power-1)*(log(x) - power/(shape-power)*rate*x^(1/power))
// = (shape/power-1)*(log(x) - power*(x/mode)^(1/power))
// = (shape/power-1)*(log(x/mode) - power*(x/mode)^(1/power) + log(mode))
// = (shape/power-1)*(log(x/mode) - power*(x/mode)^(1/power)) + (shape-power)*log((shape-power)/rate)
// = (shape-power)*(log(x/mode)/power - exp(log(x/mode)/power)) + (shape-power)*log((shape-power)/rate)
// = (shape-power)*(log(x/mode)/power + 1 - exp(log(x/mode)/power)) + (shape-power)*(log((shape-power)/rate) - 1)
// = -(shape-power)*(ExpMinus1RatioMinus1RatioMinusHalf(log(x/mode)/power) + 0.5)*(log(x/mode)/power)^2) + (shape-power)*(log((shape-power)/rate) - 1)
double xOverModeLn = logx - logMode;
double xOverModeLnOverPower = xOverModeLn / power;
if (Math.Abs(xOverModeLnOverPower) < 10)
{
double result;
if (double.IsNegativeInfinity(xOverModeLnOverPower)) result = (shape / power - 1) * logx;
else if (double.IsPositiveInfinity(xOverModeLnOverPower) || double.IsPositiveInfinity(MMath.ExpMinus1RatioMinus1RatioMinusHalf(xOverModeLnOverPower))) return (power - shape) * double.PositiveInfinity;
else result = -(shape - power) * (MMath.ExpMinus1RatioMinus1RatioMinusHalf(xOverModeLnOverPower) + 0.5) * xOverModeLnOverPower * xOverModeLnOverPower;
if (IsProper(shape, rate, power))
{
// Remaining terms are:
// shape * Math.Log(rate) - MMath.GammaLn(shape) - Math.Log(Math.Abs(power)) + (shape - power) * (Math.Log((shape - power) / rate) - 1)
if (shape > 1e10)
{
//result += power * (1 + Math.Log(rate)) - Math.Log(Math.Abs(power));
// In double precision, we can assume GammaLn(x) = (x-0.5)*log(x) - x for x > 1e10
//result += (shape - power) * Math.Log(1 - power / shape) + (0.5 - power) * Math.Log(shape);
result += shape * Math.Log(1 - power / shape) + power * (1 + logRate - logShapeMinusPower) + 0.5 * Math.Log(shape) - Math.Log(Math.Abs(power));
}
else
{
//result += (shape - power) * Math.Log(shape - power) - shape - MMath.GammaLn(shape);
result += (shape - power) * (logShapeMinusPower - logRate - 1);
result += shape * logRate - MMath.GammaLn(shape) - Math.Log(Math.Abs(power));
}
}
else
{
result += (shape - power) * (logShapeMinusPower - logRate - 1);
}
return result;
}
// Fall through
}
{
double result = 0;
double logxOverPower = logx / power;
if (logx == 0) // avoid inf * 0
{
result -= rate;
}
else if (Math.Abs(logxOverPower) < 1e-8)
{
// The part of the log-density that depends on x is:
// (shape/power-1)*log(x) - rate*x^(1/power)
// = (shape/power-1)*log(x) - rate*exp(log(x)/power))
// (when abs(log(x)/power) < 1e-8, exp(log(x)/power) = 1 + log(x)/power + 0.5*log(x)^2/power^2)
// = (shape/power-1)*log(x) - rate*(1 + log(x)/power + 0.5*log(x)^2/power^2)
// = ((shape - rate)/power - 1)*log(x) - rate*(1 + 0.5*log(x)^2/power^2)
result += ((shape - rate) / power - 1) * logx - rate * (1 + 0.5 * logxOverPower * logxOverPower);
}
else
{
if (shape != power && x != 1) result += (shape / power - 1) * logx;
if (rate != 0)
{
double xInvPowerRate = -Math.Exp(logxOverPower + logRate);
if (double.IsInfinity(xInvPowerRate) && x != 0 && !double.IsPositiveInfinity(x))
{
// recompute another way to avoid overflow
double logRatePower = logRate * power;
if (!double.IsInfinity(logRatePower))
xInvPowerRate = -Math.Exp((logx + logRatePower) / power);
}
if (double.IsInfinity(xInvPowerRate)) return xInvPowerRate;
result += xInvPowerRate;
}
}
if (IsProper(shape, rate, power))
{
double minusLogNormalizer;
if (shape > 1e10)
{
double logShape = Math.Log(shape);
// In double precision, we can assume GammaLn(x) = (x-0.5)*log(x) - x for x > 1e10
minusLogNormalizer = shape * (logRate - logShape + 1) + 0.5 * logShape - Math.Log(Math.Abs(power));
if (double.IsNegativeInfinity(minusLogNormalizer) && double.IsPositiveInfinity(result))
{
// Recompute a different way to avoid subtracting infinities
result = -logx - rate * Math.Exp(logxOverPower) + shape * (logx / power + logRate - logShape + 1) + 0.5 * logShape - Math.Log(Math.Abs(power));
}
else result += minusLogNormalizer;
}
else
{
result += shape * logRate - MMath.GammaLn(shape) - Math.Log(Math.Abs(power));
}
}
return result;
}
return result;
}
/// <summary>
@ -412,7 +644,11 @@ namespace Microsoft.ML.Probabilistic.Distributions
/// <returns>log(Gamma(x;shape,rate,power))</returns>
public double GetLogProb(double x)
{
if (IsPointMass)
if (Power == 0)
{
return (x == 1) ? 0.0 : double.NegativeInfinity;
}
else if (IsPointMass)
{
return (x == Point) ? 0.0 : Double.NegativeInfinity;
}
@ -434,7 +670,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
{
if (IsProper())
{
return MMath.GammaLn(Shape) - Shape*Math.Log(Rate) + Math.Log(Math.Abs(Power));
return MMath.GammaLn(Shape) - Shape * Math.Log(Rate) + Math.Log(Math.Abs(Power));
}
else
{
@ -462,33 +698,34 @@ namespace Microsoft.ML.Probabilistic.Distributions
else
{
// that is not a point mass.
double invPower = 1.0/that.Power;
double result = (that.Shape*invPower - 1)*GetMeanLog() - GetMeanPower(invPower)*that.Rate;
double invPower = 1.0 / that.Power;
double result = (that.Shape * invPower - 1) * GetMeanLog() - GetMeanPower(invPower) * that.Rate;
result -= that.GetLogNormalizer();
return result;
}
}
/// <summary>
/// Asks whether this Gamma instance is proper or not. A Gamma distribution
/// is proper only if Shape > 0 and Rate > 0.
/// Asks whether this GammaPower instance is proper or not. A GammaPower distribution
/// is proper only if Shape > 0 and 0 &lt; Rate &lt; infinity and Power is not infinite.
/// </summary>
/// <returns>True if proper, false otherwise</returns>
public bool IsProper()
{
return (Shape > 0) && (Rate > 0);
return (Shape > 0) && (Rate > 0) && !double.IsPositiveInfinity(Rate) && !double.IsInfinity(Power);
}
/// <summary>
/// Asks whether a Gamma distribution is proper or not. A Gamma distribution
/// is proper only if Shape > 0 and Rate > 0.
/// Asks whether a GammaPower distribution is proper or not. A GammaPower distribution
/// is proper only if Shape > 0 and 0 &lt; Rate &lt; infinity and Power is not infinite.
/// </summary>
/// <param name="shape">shape parameter for the Gamma</param>
/// <param name="rate">rate parameter for the Gamma</param>
/// <param name="shape">shape parameter</param>
/// <param name="rate">rate parameter</param>
/// <param name="power">power parameter</param>
/// <returns>True if proper, false otherwise</returns>
public static bool IsProper(double shape, double rate)
public static bool IsProper(double shape, double rate, double power)
{
return (shape > 0) && (rate > 0);
return (shape > 0) && (rate > 0) && !double.IsPositiveInfinity(rate) && !double.IsInfinity(power);
}
/// <summary>
@ -508,7 +745,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
}
else
{
GammaPower product = this*that;
GammaPower product = this * that;
//if (!product.IsProper()) throw new ArgumentException("The product is improper");
return product.GetLogNormalizer() - this.GetLogNormalizer() - that.GetLogNormalizer();
}
@ -524,7 +761,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
{
if (IsPointMass)
{
return power*that.GetLogProb(Point);
return power * that.GetLogProb(Point);
}
else if (that.IsPointMass)
{
@ -533,8 +770,8 @@ namespace Microsoft.ML.Probabilistic.Distributions
}
else
{
var product = this*(that ^ power);
return product.GetLogNormalizer() - this.GetLogNormalizer() - power*that.GetLogNormalizer();
var product = this * (that ^ power);
return product.GetLogNormalizer() - this.GetLogNormalizer() - power * that.GetLogNormalizer();
}
}
@ -551,7 +788,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
}
else
{
return Math.Pow(Rand.Gamma(Shape)/Rate, Power);
return Math.Pow(Rand.Gamma(Shape) / Rate, Power);
}
}
@ -576,7 +813,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
[Stochastic]
public static double Sample(double shape, double scale, double power)
{
return Math.Pow(Rand.Gamma(shape)*scale, power);
return Math.Pow(Rand.Gamma(shape) * scale, power);
}
/// <summary>
@ -637,6 +874,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
{
Shape = a.Shape + b.Shape - a.Power;
Rate = a.Rate + b.Rate;
CheckForPointMass();
}
}
@ -691,11 +929,11 @@ namespace Microsoft.ML.Probabilistic.Distributions
else if (forceProper && (numerator.Shape < denominator.Shape || numerator.Rate < denominator.Rate))
{
// constraints: shape >= power, rate >= 0, ((shape-power)+denominator.shape)/(rate + denominator.rate) = numerator.shape/numerator.rate
double m = numerator.Shape/numerator.Rate;
double shape = m*denominator.Rate - denominator.Shape + Power;
double m = numerator.Shape / numerator.Rate;
double shape = m * denominator.Rate - denominator.Shape + Power;
if (shape < Power)
{
Rate = denominator.Shape/m - denominator.Rate;
Rate = denominator.Shape / m - denominator.Rate;
Shape = Power;
}
else
@ -709,6 +947,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
Shape = numerator.Shape - denominator.Shape + Power;
Rate = numerator.Rate - denominator.Rate;
}
CheckForPointMass();
}
/// <summary>
@ -758,12 +997,12 @@ namespace Microsoft.ML.Probabilistic.Distributions
{
Point = dist.Point;
}
return;
}
else
{
Shape = (dist.Shape - dist.Power)*exponent + dist.Power;
Rate = dist.Rate*exponent;
Shape = (dist.Shape - dist.Power) * exponent + dist.Power;
Rate = dist.Rate * exponent;
CheckForPointMass();
}
}
@ -802,7 +1041,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
public double MaxDiff(object thatd)
{
if (!(thatd is GammaPower)) return Double.PositiveInfinity;
GammaPower that = (GammaPower) thatd;
GammaPower that = (GammaPower)thatd;
double diff1 = MMath.AbsDiff(Rate, that.Rate);
double diff2 = MMath.AbsDiff(Shape, that.Shape);
double diff3 = MMath.AbsDiff(Power, that.Power);
@ -868,8 +1107,9 @@ namespace Microsoft.ML.Probabilistic.Distributions
public GammaPower(double shape, double scale, double power)
{
Shape = shape;
Rate = 1.0/scale;
Rate = 1.0 / scale;
Power = power;
CheckForPointMass();
}
/// <summary>
@ -915,6 +1155,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
/// <returns>A new point mass Gamma distribution</returns>
public static GammaPower PointMass(double mean, double power)
{
if (power == 0 && mean != 1) throw new ArgumentOutOfRangeException(nameof(mean), mean, $"mean = {mean} is incompatible with power = 0");
GammaPower result = new GammaPower();
result.Power = power;
result.Point = mean;
@ -938,7 +1179,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
}
else
{
double scale = 1.0/Rate;
double scale = 1.0 / Rate;
string s = "GammaPower(" + Shape.ToString("g4") + ", " + scale.ToString("g4") + ", " + Power.ToString("g4") + ")";
if (IsProper())
{

Просмотреть файл

@ -0,0 +1,120 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System;
using Microsoft.ML.Probabilistic.Factors;
using Microsoft.ML.Probabilistic.Math;
namespace Microsoft.ML.Probabilistic.Distributions
{
/// <summary>
/// Estimates a GammaPower distribution from samples.
/// </summary>
/// <remarks><code>
/// The distribution is estimated via moment matching (not maximum-likelihood).
/// In the one-dimensional case,
/// E[x] = (a+1)/b
/// var(x) = (a+1)/b^2
/// b = E[x]/var(x)
/// a = E[x]^2/var(x) - 1
/// </code></remarks>
public class GammaPowerEstimator : Estimator<GammaPower>, Accumulator<GammaPower>, Accumulator<double>,
SettableTo<GammaPowerEstimator>, ICloneable
{
/// <summary>
/// Inner estimator
/// </summary>
private GammaEstimator gammaEstimator = new GammaEstimator();
/// <summary>
/// Desired power
/// </summary>
public readonly double Power;
/// <summary>
/// Creates a new GammaPower estimator
/// </summary>
public GammaPowerEstimator(double power)
{
this.Power = power;
}
/// <summary>
/// Retrieves the estimated GammaPower
/// </summary>
/// <param name="result">Where to put the result</param>
/// <returns>The resulting distribution</returns>
public GammaPower GetDistribution(GammaPower result)
{
return GammaPower.FromGamma(gammaEstimator.GetDistribution(new Gamma()), Power);
}
/// <summary>
/// Adds a GammaPower distribution item to the estimator
/// </summary>
/// <param name="distribution">The distribution instance to add</param>
public void Add(GammaPower distribution)
{
Add(distribution, 1.0);
}
/// <summary>
/// Adds a GammaPower distribution item to the estimator
/// </summary>
/// <param name="distribution">The distribution instance to add</param>
/// <param name="weight">The weight of the sample</param>
public void Add(GammaPower distribution, double weight)
{
gammaEstimator.Add(Gamma.FromShapeAndRate(distribution.Shape, distribution.Rate), weight);
}
/// <summary>
/// Adds a sample to the estimator
/// </summary>
/// <param name="value">The sample to add</param>
public void Add(double value)
{
gammaEstimator.Add(System.Math.Pow(value, 1/Power));
}
/// <summary>
/// Adds a sample with a given weight to the estimator
/// </summary>
/// <param name="value">The sample to add</param>
/// <param name="weight">The weight of the sample</param>
public void Add(double value, double weight)
{
gammaEstimator.Add(System.Math.Pow(value, 1/Power), weight);
}
/// <summary>
/// Clears the estimator
/// </summary>
public void Clear()
{
gammaEstimator.Clear();
}
/// <summary>
/// Sets the state of this estimator from the supplied estimator.
/// </summary>
/// <param name="value"></param>
public void SetTo(GammaPowerEstimator value)
{
if (this.Power != value.Power) throw new ArgumentException($"Incompatible powers: this.Power={Power}, value.Power={value.Power}", nameof(value));
gammaEstimator.SetTo(value.gammaEstimator);
}
/// <summary>
/// Returns a clone of this estimator.
/// </summary>
/// <returns></returns>
public object Clone()
{
GammaPowerEstimator result = new GammaPowerEstimator(Power);
result.SetTo(this);
return result;
}
}
}

Просмотреть файл

@ -259,7 +259,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
return Math.Max(quantiles[n-1], upperGaussian.GetQuantile(probability));
}
if (n == 1) return quantiles[0]; // probability must be 0.5
double pos = MMath.LargestDoubleProduct(n + 1, probability) - 1;
double pos = MMath.LargestDoubleProduct(probability, n + 1) - 1;
int lower = (int)Math.Floor(pos);
if (lower == n - 1) return quantiles[lower];
return OuterQuantiles.GetQuantile(probability, lower + 1, quantiles[lower], quantiles[lower + 1], n + 2);

Просмотреть файл

@ -98,7 +98,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
double slope = (quantiles[largerIndex] - quantiles[smallerIndex]) / (probabilities[largerIndex] - probabilities[smallerIndex]);
// Solve for the largest x such that probabilities[smallerIndex] + (x - quantiles[smallerIndex]) / slope <= probability.
double frac = MMath.LargestDoubleSum(-probabilities[smallerIndex], probability);
double offset = MMath.LargestDoubleProduct(slope, frac);
double offset = MMath.LargestDoubleProduct(frac, slope);
return MMath.LargestDoubleSum(quantiles[smallerIndex], offset);
}
}

Просмотреть файл

@ -151,7 +151,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
return MMath.NextDouble(quantiles[n - 1]);
}
if (n == 1) return quantiles[0];
double pos = MMath.LargestDoubleProduct(n - 1, probability);
double pos = MMath.LargestDoubleProduct(probability, n - 1);
int lower = (int)Math.Floor(pos);
if (lower == n - 1) return quantiles[lower];
return GetQuantile(probability, lower, quantiles[lower], quantiles[lower + 1], n);
@ -171,13 +171,13 @@ namespace Microsoft.ML.Probabilistic.Distributions
if(probability < 0) throw new ArgumentOutOfRangeException(nameof(probability), probability, $"{nameof(probability)} < 0");
if (probability > 1) throw new ArgumentOutOfRangeException(nameof(probability), probability, $"{nameof(probability)} > 1");
if (n <= 1) throw new ArgumentOutOfRangeException(nameof(n), n, "n <= 1");
double pos = MMath.LargestDoubleProduct(n - 1, probability);
double pos = MMath.LargestDoubleProduct(probability, n - 1);
double frac = MMath.LargestDoubleSum(-lowerIndex, pos);
if (upperItem < lowerItem) throw new ArgumentOutOfRangeException(nameof(upperItem), upperItem, $"{nameof(upperItem)} ({upperItem}) < {nameof(lowerItem)} ({lowerItem})");
if (upperItem == lowerItem) return lowerItem;
// The above check ensures diff > 0
double diff = upperItem - lowerItem;
double offset = MMath.LargestDoubleProduct(diff, frac);
double offset = MMath.LargestDoubleProduct(frac, diff);
return MMath.LargestDoubleSum(lowerItem, offset);
}
}

Просмотреть файл

@ -184,7 +184,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
else if (InterpolationType == 1)
{
// Find frac such that (lowerRank - 0.5 + frac) / itemCount == probability
double scaledProbability = MMath.LargestDoubleProduct(itemCount, probability);
double scaledProbability = MMath.LargestDoubleProduct(probability, itemCount);
if (scaledProbability < 0.5) return lowerBound;
if (scaledProbability >= itemCount - 0.5) return upperBound;
// probability of lowerItem ranges from (lowerRank-lowerWeight+0.5) / itemCount
@ -213,7 +213,7 @@ namespace Microsoft.ML.Probabilistic.Distributions
}
else
{
double scaledProbability = MMath.LargestDoubleProduct(itemCount - 1, probability);
double scaledProbability = MMath.LargestDoubleProduct(probability, itemCount - 1);
// probability of lowerItem ranges from (lowerRank-lowerWeight) / (itemCount - 1)
// to (lowerRank - 1) / (itemCount - 1).
if (scaledProbability == lowerRank - lowerWeight) return lowerItem;

Просмотреть файл

@ -499,14 +499,14 @@ namespace Microsoft.ML.Probabilistic.Distributions
else
{
double Z = GetNormalizer();
if(Z == 0)
if (Z == 0)
{
double mean = this.Gamma.GetMean();
return Math.Min(UpperBound, Math.Max(LowerBound, mean));
}
// if Z is not zero, then Z1 cannot be zero.
double Z1 = MMath.GammaLower(this.Gamma.Shape + 1, this.Gamma.Rate * UpperBound) - MMath.GammaLower(this.Gamma.Shape + 1, this.Gamma.Rate * LowerBound);
double sum = this.Gamma.Shape/this.Gamma.Rate * Z1;
double sum = this.Gamma.Shape / this.Gamma.Rate * Z1;
return sum / Z;
}
}
@ -536,16 +536,18 @@ namespace Microsoft.ML.Probabilistic.Distributions
else
{
double Z = GetNormalizer();
if(Z == 0)
if (Z == 0)
{
mean = Math.Min(UpperBound, Math.Max(LowerBound, this.Gamma.GetMean()));
variance = 0.0;
return;
}
double m = this.Gamma.Shape / this.Gamma.Rate;
double sum = m * (MMath.GammaLower(this.Gamma.Shape + 1, this.Gamma.Rate * UpperBound) - MMath.GammaLower(this.Gamma.Shape + 1, this.Gamma.Rate * LowerBound));
mean = sum / Z;
double sum2 = m * (this.Gamma.Shape+1)/this.Gamma.Rate * (MMath.GammaLower(this.Gamma.Shape + 2, this.Gamma.Rate * UpperBound) - MMath.GammaLower(this.Gamma.Shape + 2, this.Gamma.Rate * LowerBound));
// t = x * Rate
// dt = dx * Rate
double Z1 = MMath.GammaLower(this.Gamma.Shape + 1, this.Gamma.Rate * UpperBound) - MMath.GammaLower(this.Gamma.Shape + 1, this.Gamma.Rate * LowerBound);
mean = m * Z1 / Z;
double sum2 = m * (this.Gamma.Shape + 1) / this.Gamma.Rate * (MMath.GammaLower(this.Gamma.Shape + 2, this.Gamma.Rate * UpperBound) - MMath.GammaLower(this.Gamma.Shape + 2, this.Gamma.Rate * LowerBound));
variance = sum2 / Z - mean * mean;
}
}
@ -561,6 +563,40 @@ namespace Microsoft.ML.Probabilistic.Distributions
return var;
}
/// <summary>
/// Computes E[x^power]
/// </summary>
/// <returns></returns>
public double GetMeanPower(double power)
{
if (power == 0.0) return 1.0;
else if (IsPointMass) return Math.Pow(Point, power);
//else if (Rate == 0.0) return (power > 0) ? Double.PositiveInfinity : 0.0;
else if (!IsProper()) throw new ImproperDistributionException(this);
else if (this.Gamma.Shape <= -power && LowerBound == 0)
{
throw new ArgumentException("Cannot compute E[x^" + power + "] for " + this + " (shape <= " + (-power) + ")");
}
else
{
double Z = GetNormalizer();
double shapePlusPower = this.Gamma.Shape + power;
double Z1;
bool regularized = shapePlusPower >= 1;
if (regularized)
{
Z1 = Math.Exp(MMath.GammaLn(shapePlusPower) - MMath.GammaLn(this.Gamma.Shape)) *
(MMath.GammaLower(shapePlusPower, this.Gamma.Rate * UpperBound) - MMath.GammaLower(shapePlusPower, this.Gamma.Rate * LowerBound));
}
else
{
Z1 = Math.Exp(- MMath.GammaLn(this.Gamma.Shape)) *
(MMath.GammaUpper(shapePlusPower, this.Gamma.Rate * LowerBound, regularized) - MMath.GammaUpper(shapePlusPower, this.Gamma.Rate * UpperBound, regularized));
}
return Math.Pow(this.Gamma.Rate, -power) * Z1 / Z;
}
}
/// <summary>
/// Get the logarithm of the average value of that distribution under this distribution, i.e. log(int this(x) that(x) dx)
/// </summary>

Просмотреть файл

@ -0,0 +1,459 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
namespace Microsoft.ML.Probabilistic.Factors
{
using System;
using System.Diagnostics;
using Microsoft.ML.Probabilistic.Distributions;
using Microsoft.ML.Probabilistic.Math;
using Microsoft.ML.Probabilistic.Factors.Attributes;
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/doc/*'/>
[FactorMethod(typeof(Factor), "Product", typeof(double), typeof(double))]
[Buffers("Q")]
[Quality(QualityBand.Experimental)]
public static class GammaPowerProductOp_Laplace
{
// derivatives of the factor marginalized over Product and A
internal static double[] dlogfs(double b, GammaPower product, GammaPower A)
{
if (A.Power != product.Power) throw new NotSupportedException($"A.Power ({A.Power}) != product.Power ({product.Power})");
if (product.IsPointMass)
{
double productPointPower = Math.Pow(product.Point, 1 / A.Power);
if (productPointPower > double.MaxValue) return new double[] { 0, 0, 0, 0 };
// int delta(a^pa * b^pb - y) Ga(a; s, r) da
// = int delta(a' - y) Ga(a'^(1/pa)/b^(pb/pa); s, r) a'^(1/pa-1)/b^(pb/pa)/pa da'
// = Ga(y^(1/pa)/b^(pb/pa); s, r) y^(1/pa-1)/b^(pb/pa)/pa
// logf = -s*pb/pa*log(b) - r*y^(1/pa)/b^(pb/pa)
double ib = 1 / b;
double ib2 = ib * ib;
double s = A.Shape;
double c = A.Rate * productPointPower / b;
double dlogf = -s * ib + c * ib;
double ddlogf = s * ib2 - 2 * c * ib2;
double dddlogf = -2 * s * ib * ib2 + 6 * c * ib2 * ib;
double d4logf = 6 * s * ib2 * ib2 - 24 * c * ib2 * ib2;
return new double[] { dlogf, ddlogf, dddlogf, d4logf };
}
else if (A.IsPointMass)
{
// (a * b^pb)^(y_s/py - 1) exp(-y_r*(a*b^pb)^(1/py))
// logf = (y_s/py-1)*pb*log(b) - y_r*a^(1/py)*b^(pb/py)
double ib = 1 / b;
double ib2 = ib * ib;
double s = product.Shape - product.Power;
double c = product.Rate * Math.Pow(A.Point, 1 / product.Power);
double dlogf = s * ib - c;
double ddlogf = -s * ib2;
double dddlogf = 2 * s * ib * ib2;
double d4logf = -6 * s * ib2 * ib2;
return new double[] { dlogf, ddlogf, dddlogf, d4logf };
}
else
{
// int (a^pa * b^pb)^(y_s/y_p - 1) exp(-y_r*(a^pa*b^pb)^(1/y_p)) Ga(a; s, r) da
// = int a^(pa*(y_s/y_p-1) + s-1) b^(pb*(y_s/y_p-1)) exp(-y_r a^(pa/y_p) b^(pb/y_p) -r*a) da
// where pa = pb = y_p:
// = int a^(y_s-pa + s-1) b^(y_s-y_p) exp(-(y_r b + r)*a) da
// = b^(y_s-y_p) / (r + b y_r)^(y_s-pa + s)
// logf = (y_s-y_p)*log(b) - (s+y_s-pa)*log(r + b*y_r)
double b2 = b * b;
double s = product.Shape - product.Power;
double c = GammaFromShapeAndRateOp_Slow.AddShapesMinus1(product.Shape, A.Shape) + (1 - A.Power);
double denom = A.Rate / product.Rate + b;
if (product.Rate == 0)
{
c = 0;
denom = double.PositiveInfinity;
}
if (b < 1e-77)
{
double bOverDenom = b / denom;
double bOverDenom2 = bOverDenom * bOverDenom;
double dlogf = (s - c * bOverDenom) / b;
double ddlogf = (-s + c * bOverDenom2) / b2;
double dddlogf = (2 * s - 2 * c * bOverDenom * bOverDenom2) / (b * b2);
double d4logf = (-6 * s + 6 * c * bOverDenom2 * bOverDenom2) / (b2 * b2);
return new double[] { dlogf, ddlogf, dddlogf, d4logf };
}
else
{
double denom2 = denom * denom;
double dlogf = s / b - c / denom;
double ddlogf = -s / b2 + c / denom2;
double dddlogf = 2 * s / (b * b2) - 2 * c / (denom * denom2);
double d4logf = -6 * s / (b2 * b2) + 6 * c / (denom2 * denom2);
return new double[] { dlogf, ddlogf, dddlogf, d4logf };
}
}
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaPowerProductOp_Laplace"]/message_doc[@name="QInit()"]/*'/>
[Skip]
public static Gamma QInit()
{
return Gamma.Uniform();
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaPowerProductOp_Laplace"]/message_doc[@name="Q(GammaPower, GammaPower, GammaPower)"]/*'/>
[Fresh]
public static Gamma Q(GammaPower product, [Proper] GammaPower A, [Proper] GammaPower B)
{
// ensure B has the larger shape
if (B.Shape < A.Shape) return Q(product, B, A);
if (B.IsPointMass)
return Gamma.PointMass(B.Point);
if (A.IsPointMass)
return Gamma.FromShapeAndRate(B.Shape, B.Rate);
if (A.Power != product.Power) throw new NotSupportedException($"A.Power ({A.Power}) != product.Power ({product.Power})");
if (B.Power != product.Power) throw new NotSupportedException($"B.Power ({B.Power}) != product.Power ({product.Power})");
double x;
if (product.IsPointMass)
{
if (product.Point == 0) return Gamma.PointMass(0);
double productPointPower = Math.Pow(product.Point, 1 / A.Power);
// y = product^(1/power)
// logf = -a_s*log(b) - y*a_r/b
// logp = b_s*log(b) - b_r*b
// dlogfp = (b_s-a_s)/b - b_r + y*a_r/b^2 = 0
// -b_r b^2 + (b_s-a_s) b + y*a_r = 0
double shape = B.Shape - A.Shape;
x = (Math.Sqrt(shape * shape + 4 * B.Rate * A.Rate * productPointPower) + shape) / 2 / B.Rate;
}
else
{
double shape1 = GammaFromShapeAndRateOp_Slow.AddShapesMinus1(B.Shape, product.Shape) + (1 - product.Power);
if (product.Rate == 0)
{
x = GammaFromShapeAndRateOp_Slow.FindMaximum(shape1, 0, A.Rate, B.Rate);
}
else
{
double shape2 = GammaFromShapeAndRateOp_Slow.AddShapesMinus1(A.Shape, product.Shape) + (1 - A.Power);
// find the maximum of the factor marginalized over Product and A, times B
// From above:
// logf = (y_s/y_p-1)*pb*log(b) - (s+y_s-pa)*log(r + b^(pb/y_p)*y_r)
x = GammaFromShapeAndRateOp_Slow.FindMaximum(shape1, shape2, A.Rate / product.Rate, B.Rate);
}
if (x == 0)
x = 1e-100;
}
double[] dlogfss = dlogfs(x, product, A);
double dlogf = dlogfss[0];
double ddlogf = dlogfss[1];
return GammaFromShapeAndRateOp_Laplace.GammaFromDerivatives(Gamma.FromShapeAndRate(B.Shape, B.Rate), x, dlogf, ddlogf);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaPowerProductOp_Laplace"]/message_doc[@name="LogAverageFactor(GammaPower, GammaPower, GammaPower, Gamma)"]/*'/>
public static double LogAverageFactor(GammaPower product, GammaPower A, GammaPower B, Gamma q)
{
if (B.Shape < A.Shape) return LogAverageFactor(product, B, A, q);
if (B.IsPointMass)
return GammaProductOp.LogAverageFactor(product, A, B.Point);
if (A.IsPointMass)
return GammaProductOp.LogAverageFactor(product, A.Point, B);
double qPoint = q.GetMean();
double logf;
if (product.IsPointMass)
{
// Ga(y/q; s, r)/q
if (qPoint == 0)
{
if (product.Point == 0)
logf = A.GetLogProb(0);
else
logf = double.NegativeInfinity;
}
else logf = A.GetLogProb(product.Point / qPoint) - Math.Log(qPoint);
}
else
{
// int Ga^y_p(a^pa b^pb; y_s, y_r) Ga(a; s, r) da = q^(y_s-y_p) / (r + q y_r)^(y_s + s-pa) Gamma(y_s+s-pa)
double shape = product.Shape - product.Power;
double shape2 = GammaFromShapeAndRateOp_Slow.AddShapesMinus1(product.Shape, A.Shape) + (1 - A.Power);
if (IsProper(product) && product.Shape > A.Shape)
{
// same as below but product.GetLogNormalizer() is inlined and combined with other terms
double AShapeMinusPower = A.Shape - A.Power;
logf = shape * Math.Log(qPoint)
- Gamma.FromShapeAndRate(A.Shape, A.Rate).GetLogNormalizer()
- product.Shape * Math.Log(A.Rate / product.Rate + qPoint)
- Math.Log(Math.Abs(product.Power));
if (AShapeMinusPower != 0)
logf += AShapeMinusPower * (MMath.RisingFactorialLnOverN(product.Shape, AShapeMinusPower) - Math.Log(A.Rate + qPoint * product.Rate));
}
else
{
logf = shape * Math.Log(qPoint)
- shape2 * Math.Log(A.Rate + qPoint * product.Rate)
+ MMath.GammaLn(shape2)
- Gamma.FromShapeAndRate(A.Shape, A.Rate).GetLogNormalizer()
- product.GetLogNormalizer();
// normalizer is -MMath.GammaLn(Shape) + Shape * Math.Log(Rate) - Math.Log(Math.Abs(Power))
}
}
double logz = logf + Gamma.FromShapeAndRate(B.Shape, B.Rate).GetLogProb(qPoint) - q.GetLogProb(qPoint);
return logz;
}
private static bool IsProper(GammaPower gammaPower)
{
return (gammaPower.Shape > 0) && (gammaPower.Rate > 0) && !double.IsInfinity(gammaPower.Power);
}
private static double LogAverageFactor_slow(GammaPower product, GammaPower A, [Proper] GammaPower B)
{
return LogAverageFactor(product, A, B, Q(product, A, B));
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaPowerProductOp_Laplace"]/message_doc[@name="LogEvidenceRatio(GammaPower, GammaPower, GammaPower, GammaPower, Gamma)"]/*'/>
public static double LogEvidenceRatio([SkipIfUniform] GammaPower product, GammaPower A, GammaPower B, GammaPower to_product, Gamma q)
{
//if (double.IsPositiveInfinity(product.Rate)) return double.NegativeInfinity;
return LogAverageFactor(product, A, B, q) - to_product.GetLogAverageOf(product);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaPowerProductOp_Laplace"]/message_doc[@name="LogEvidenceRatio(double, GammaPower, GammaPower, Gamma)"]/*'/>
public static double LogEvidenceRatio(double product, GammaPower A, GammaPower B, Gamma q)
{
return LogAverageFactor(GammaPower.PointMass(product, 1), A, B, q);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaPowerProductOp_Laplace"]/message_doc[@name="BAverageConditional(GammaPower, GammaPower, GammaPower, Gamma, GammaPower)"]/*'/>
public static GammaPower BAverageConditional([SkipIfUniform] GammaPower product, [Proper] GammaPower A, [Proper] GammaPower B, Gamma q, GammaPower result)
{
if (B.Shape < A.Shape) return AAverageConditional(product, B, A, q, result);
if (A.IsPointMass)
return GammaProductOp.BAverageConditional(product, A.Point, result);
if (B.IsPointMass)
return GammaPower.Uniform(result.Power); // TODO
if (product.IsUniform()) return product;
if (q.IsUniform())
q = Q(product, A, B);
double bPoint = q.GetMean();
// derivatives of b
double[] bDerivatives = new double[] { bPoint, 1, 0, 0 };
double bMean, bVariance;
GaussianOp_Laplace.LaplaceMoments(q, bDerivatives, dlogfs(bPoint, product, A), out bMean, out bVariance);
GammaPower bMarginal = GammaPower.FromGamma(Gamma.FromMeanAndVariance(bMean, bVariance), result.Power);
result.SetToRatio(bMarginal, B, GammaProductOp_Laplace.ForceProper);
return result;
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaPowerProductOp_Laplace"]/message_doc[@name="ProductAverageConditional(GammaPower, GammaPower, GammaPower, Gamma, GammaPower)"]/*'/>
public static GammaPower ProductAverageConditional(GammaPower product, [Proper] GammaPower A, [SkipIfUniform] GammaPower B, Gamma q, GammaPower result)
{
if (B.Shape < A.Shape) return ProductAverageConditional(product, B, A, q, result);
if (B.IsPointMass)
return GammaProductOp.ProductAverageConditional(A, B.Point);
if (B.IsUniform())
return GammaPower.Uniform(result.Power);
if (A.IsPointMass)
return GammaProductOp.ProductAverageConditional(A.Point, B);
if (product.IsPointMass)
return GammaPower.Uniform(result.Power); // TODO
if (product.Rate == 0 && A.Rate == 0)
return GammaPower.Uniform(result.Power);
if (A.Power != product.Power) throw new NotSupportedException($"A.Power ({A.Power}) != product.Power ({product.Power})");
if (B.Power != product.Power) throw new NotSupportedException($"B.Power ({B.Power}) != product.Power ({product.Power})");
if (A.Rate == 0)
{
if (B.Rate == 0) return GammaPower.FromShapeAndRate(Math.Min(A.Shape, B.Shape), 0, result.Power);
else return A;
}
if (B.Rate == 0) return B;
double qPoint = q.GetMean();
double r = product.Rate;
double shape2 = GammaFromShapeAndRateOp_Slow.AddShapesMinus1(product.Shape, A.Shape) + (1 - A.Power);
GammaPower productMarginal;
if (shape2 > 2 && result.Power < 0)
{
// Compute the moments of product^(-1/product.Power)
// Here q = b^(1/b.Power)
// E[a^(-1/a.Power) b^(-1/b.Power)] = E[(q r + a_r)/(shape2-1)/q]
// var(a^(-1/a.Power) b^(-1/b.Power)) = E[(q r + a_r)^2/(shape2-1)/(shape2-2)/q^2] - E[a^(-1/a.Power) b^(-1/b.Power)]^2
// = (var((q r + a_r)/q) + E[(q r + a_r)/q]^2)/(shape2-1)/(shape2-2) - E[(q r + a_r)/q]^2/(shape2-1)^2
// = var((q r + a_r)/q)/(shape2-1)/(shape2-2) + E[(q r + a_r)/(shape2-1)/q]^2/(shape2-2)
double iq = 1 / qPoint;
double iq2 = iq * iq;
double[] iqDerivatives = new double[] { iq, -iq2, 2 * iq2 * iq, -6 * iq2 * iq2 };
double iqMean, iqVariance;
GaussianOp_Laplace.LaplaceMoments(q, iqDerivatives, dlogfs(qPoint, product, A), out iqMean, out iqVariance);
double ipMean = (r + A.Rate * iqMean) / (shape2 - 1);
double ipVariance = (iqVariance * A.Rate * A.Rate / (shape2 - 1) + ipMean * ipMean) / (shape2 - 2);
GammaPower ipMarginal = GammaPower.FromMeanAndVariance(ipMean, ipVariance, -1);
if (ipMarginal.IsUniform())
{
return GammaPower.Uniform(result.Power);
}
else
productMarginal = GammaPower.FromShapeAndRate(ipMarginal.Shape, ipMarginal.Rate, result.Power);
bool check = false;
if (check)
{
// Importance sampling
MeanVarianceAccumulator mvaQ = new MeanVarianceAccumulator();
MeanVarianceAccumulator mvaInvProduct = new MeanVarianceAccumulator();
Gamma bPrior = Gamma.FromShapeAndRate(B.Shape, B.Rate);
//q = bPrior;
double shift = (product.Shape - product.Power) * Math.Log(qPoint) - shape2 * Math.Log(A.Rate + qPoint * r) + bPrior.GetLogProb(qPoint) - q.GetLogProb(qPoint);
for (int i = 0; i < 1000000; i++)
{
double qSample = q.Sample();
// logf = (y_s-y_p)*log(b) - (s+y_s-pa)*log(r + b*y_r)
double logf = (product.Shape - product.Power) * Math.Log(qSample) - shape2 * Math.Log(A.Rate + qSample * r) + bPrior.GetLogProb(qSample) - q.GetLogProb(qSample);
double weight = Math.Exp(logf - shift);
mvaQ.Add(1 / qSample, weight);
double invProduct = (r + A.Rate / qSample) / (shape2 - 1);
mvaInvProduct.Add(invProduct, weight);
}
Trace.WriteLine($"q = {mvaQ}, {iqMean}, {iqVariance}");
Trace.WriteLine($"invA = {mvaInvProduct} {mvaInvProduct.Variance * (shape2 - 1) / (shape2 - 2) + mvaInvProduct.Mean * mvaInvProduct.Mean / (shape2 - 2)}, {ipMean}, {ipVariance}");
Trace.WriteLine($"productMarginal = {productMarginal}");
}
}
else
{
// Compute the moments of y = product^(1/product.Power)
// yMean = E[shape2*b/(b y_r + a_r)]
// yVariance = E[shape2*(shape2+1)*b^2/(b y_r + a_r)^2] - yMean^2
// = var(shape2*b/(b y_r + a_r)) + E[shape2*b^2/(b y_r + a_r)^2]
// = shape2^2*var(b/(b y_r + a_r)) + shape2*(var(b/(b y_r + a_r)) + (yMean/shape2)^2)
// Let g = b/(b y_r + a_r)
double denom = qPoint * r + A.Rate;
double denom2 = denom * denom;
double rOverDenom = r / denom;
double[] gDerivatives = (denom == 0)
? new double[] { 0, 0, 0, 0 }
: new double[] { qPoint / denom, A.Rate / denom2, -2 * A.Rate / denom2 * rOverDenom, 6 * A.Rate / denom2 * rOverDenom * rOverDenom };
double gMean, gVariance;
GaussianOp_Laplace.LaplaceMoments(q, gDerivatives, dlogfs(qPoint, product, A), out gMean, out gVariance);
double yMean = shape2 * gMean;
double yVariance = shape2 * shape2 * gVariance + shape2 * (gVariance + gMean * gMean);
productMarginal = GammaPower.FromGamma(Gamma.FromMeanAndVariance(yMean, yVariance), result.Power);
}
result.SetToRatio(productMarginal, product, GammaProductOp_Laplace.ForceProper);
if (double.IsNaN(result.Shape) || double.IsNaN(result.Rate))
throw new InferRuntimeException("result is nan");
return result;
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaPowerProductOp_Laplace"]/message_doc[@name="AAverageConditional(GammaPower, GammaPower, GammaPower, Gamma, GammaPower)"]/*'/>
public static GammaPower AAverageConditional([SkipIfUniform] GammaPower product, GammaPower A, [SkipIfUniform] GammaPower B, Gamma q, GammaPower result)
{
if (B.Shape < A.Shape) return BAverageConditional(product, B, A, q, result);
if (B.IsPointMass)
return GammaProductOp.AAverageConditional(product, B.Point, result);
if (A.IsPointMass)
return GammaPower.Uniform(A.Power); // TODO
if (product.IsUniform()) return product;
double qPoint = q.GetMean();
GammaPower aMarginal;
if (product.IsPointMass)
{
// Z = int Ga(y/q; s, r)/q Ga(q; q_s, q_r) dq
// E[a] = E[product/q]
// E[a^2] = E[product^2/q^2]
// aVariance = E[a^2] - aMean^2
double productPoint = product.Point;
if (productPoint == 0) aMarginal = GammaPower.PointMass(0, result.Power);
else
{
double iq = 1 / qPoint;
double iq2 = iq * iq;
double[] iqDerivatives = new double[] { iq, -iq2, 2 * iq2 * iq, -6 * iq2 * iq2 };
double iqMean, iqVariance;
GaussianOp_Laplace.LaplaceMoments(q, iqDerivatives, dlogfs(qPoint, product, A), out iqMean, out iqVariance);
double aMean = productPoint * iqMean;
double aVariance = productPoint * productPoint * iqVariance;
aMarginal = GammaPower.FromGamma(Gamma.FromMeanAndVariance(aMean, aVariance), result.Power);
}
}
else
{
if (double.IsPositiveInfinity(product.Rate))
{
return GammaPower.PointMass(0, result.Power);
}
if (A.Power != product.Power) throw new NotSupportedException($"A.Power ({A.Power}) != product.Power ({product.Power})");
if (B.Power != product.Power) throw new NotSupportedException($"B.Power ({B.Power}) != product.Power ({product.Power})");
double r = product.Rate;
double r2 = r * r;
double g = 1 / (qPoint * r + A.Rate);
double g2 = g * g;
double shape2 = GammaFromShapeAndRateOp_Slow.AddShapesMinus1(product.Shape, A.Shape) + (1 - A.Power);
// From above:
// a^(y_s-pa + a_s-1) exp(-(y_r b + a_r)*a)
if (shape2 > 2)
{
// Compute the moments of a^(-1/a.Power)
// Here q = b^(1/b.Power)
// E[a^(-1/a.Power)] = E[(q r + a_r)/(shape2-1)]
// var(a^(-1/a.Power)) = E[(q r + a_r)^2/(shape2-1)/(shape2-2)] - E[a^(-1/a.Power)]^2
// = (var(q r + a_r) + E[(q r + a_r)]^2)/(shape2-1)/(shape2-2) - E[(q r + a_r)]^2/(shape2-1)^2
// = var(q r + a_r)/(shape2-1)/(shape2-2) + E[(q r + a_r)/(shape2-1)]^2/(shape2-2)
double[] qDerivatives = new double[] { qPoint, 1, 0, 0 };
double qMean, qVariance;
GaussianOp_Laplace.LaplaceMoments(q, qDerivatives, dlogfs(qPoint, product, A), out qMean, out qVariance);
double iaMean = (qMean * r + A.Rate) / (shape2 - 1);
double iaVariance = (qVariance * r2 / (shape2 - 1) + iaMean * iaMean) / (shape2 - 2);
GammaPower iaMarginal = GammaPower.FromMeanAndVariance(iaMean, iaVariance, -1);
if (iaMarginal.IsUniform())
{
if (result.Power > 0)
return GammaPower.PointMass(0, result.Power);
else
return GammaPower.Uniform(result.Power);
}
else
aMarginal = GammaPower.FromShapeAndRate(iaMarginal.Shape, iaMarginal.Rate, result.Power);
bool check = false;
if (check)
{
// Importance sampling
MeanVarianceAccumulator mvaB = new MeanVarianceAccumulator();
MeanVarianceAccumulator mvaInvA = new MeanVarianceAccumulator();
Gamma bPrior = Gamma.FromShapeAndRate(B.Shape, B.Rate);
q = bPrior;
double shift = (product.Shape - product.Power) * Math.Log(qPoint) - shape2 * Math.Log(A.Rate + qPoint * r) + bPrior.GetLogProb(qPoint) - q.GetLogProb(qPoint);
for (int i = 0; i < 1000000; i++)
{
double bSample = q.Sample();
// logf = (y_s-y_p)*log(b) - (s+y_s-pa)*log(r + b*y_r)
double logf = (product.Shape - product.Power) * Math.Log(bSample) - shape2 * Math.Log(A.Rate + bSample * r) + bPrior.GetLogProb(bSample) - q.GetLogProb(bSample);
double weight = Math.Exp(logf - shift);
mvaB.Add(bSample, weight);
double invA = (bSample * r + A.Rate) / (shape2 - 1);
mvaInvA.Add(invA, weight);
}
Trace.WriteLine($"b = {mvaB}, {qMean}, {qVariance}");
Trace.WriteLine($"invA = {mvaInvA} {mvaInvA.Variance * (shape2 - 1) / (shape2 - 2) + mvaInvA.Mean * mvaInvA.Mean / (shape2 - 2)}, {iaMean}, {iaVariance}");
Trace.WriteLine($"aMarginal = {aMarginal}");
}
}
else
{
// Compute the moments of a^(1/a.Power)
// aMean = shape2/(b y_r + a_r)
// aVariance = E[shape2*(shape2+1)/(b y_r + a_r)^2] - aMean^2 = var(shape2/(b y_r + a_r)) + E[shape2/(b y_r + a_r)^2]
// = shape2^2*var(1/(b y_r + a_r)) + shape2*(var(1/(b y_r + a_r)) + (aMean/shape2)^2)
double[] gDerivatives = new double[] { g, -r * g2, 2 * g2 * g * r2, -6 * g2 * g2 * r2 * r };
double gMean, gVariance;
GaussianOp_Laplace.LaplaceMoments(q, gDerivatives, dlogfs(qPoint, product, A), out gMean, out gVariance);
double aMean = shape2 * gMean;
double aVariance = shape2 * shape2 * gVariance + shape2 * (gVariance + gMean * gMean);
aMarginal = GammaPower.FromGamma(Gamma.FromMeanAndVariance(aMean, aVariance), result.Power);
}
}
result.SetToRatio(aMarginal, A, GammaProductOp_Laplace.ForceProper);
if (double.IsNaN(result.Shape) || double.IsNaN(result.Rate))
throw new InferRuntimeException("result is nan");
return result;
}
}
}

Просмотреть файл

@ -0,0 +1,89 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
namespace Microsoft.ML.Probabilistic.Factors
{
using System;
using System.Diagnostics;
using Microsoft.ML.Probabilistic.Distributions;
using Microsoft.ML.Probabilistic.Math;
using Microsoft.ML.Probabilistic.Factors.Attributes;
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/doc/*'/>
[FactorMethod(typeof(Factor), "Product", typeof(double), typeof(double))]
[Buffers("Q")]
[Quality(QualityBand.Experimental)]
public static class GammaProductOp_Laplace
{
public static bool ForceProper = true;
// derivatives of the factor marginalized over Product and A
private static double[] dlogfs(double b, Gamma product, Gamma A)
{
return GammaPowerProductOp_Laplace.dlogfs(b, GammaPower.FromGamma(product, 1), GammaPower.FromGamma(A, 1));
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/message_doc[@name="QInit()"]/*'/>
[Skip]
public static Gamma QInit()
{
return Gamma.Uniform();
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/message_doc[@name="Q(Gamma, Gamma, Gamma)"]/*'/>
[Fresh]
public static Gamma Q(Gamma product, [Proper] Gamma A, [Proper] Gamma B)
{
return GammaPowerProductOp_Laplace.Q(GammaPower.FromGamma(product, 1), GammaPower.FromGamma(A, 1), GammaPower.FromGamma(B, 1));
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/message_doc[@name="LogAverageFactor(Gamma, Gamma, Gamma, Gamma)"]/*'/>
public static double LogAverageFactor(Gamma product, Gamma A, Gamma B, Gamma q)
{
return GammaPowerProductOp_Laplace.LogAverageFactor(GammaPower.FromGamma(product, 1), GammaPower.FromGamma(A, 1), GammaPower.FromGamma(B, 1), q);
}
private static double LogAverageFactor_slow(Gamma product, Gamma A, [Proper] Gamma B)
{
return LogAverageFactor(product, A, B, Q(product, A, B));
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/message_doc[@name="LogEvidenceRatio(Gamma, Gamma, Gamma, Gamma, Gamma)"]/*'/>
public static double LogEvidenceRatio([SkipIfUniform] Gamma product, Gamma A, Gamma B, Gamma to_product, Gamma q)
{
return LogAverageFactor(product, A, B, q) - to_product.GetLogAverageOf(product);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/message_doc[@name="LogEvidenceRatio(double, Gamma, Gamma, Gamma)"]/*'/>
public static double LogEvidenceRatio(double product, Gamma A, Gamma B, Gamma q)
{
return LogAverageFactor(Gamma.PointMass(product), A, B, q);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/message_doc[@name="BAverageConditional(Gamma, Gamma, Gamma, Gamma)"]/*'/>
public static Gamma BAverageConditional([SkipIfUniform] Gamma product, [Proper] Gamma A, [Proper] Gamma B, Gamma q)
{
return ToGamma(GammaPowerProductOp_Laplace.BAverageConditional(GammaPower.FromGamma(product, 1), GammaPower.FromGamma(A, 1), GammaPower.FromGamma(B, 1), q, GammaPower.Uniform(1)));
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/message_doc[@name="ProductAverageConditional(Gamma, Gamma, Gamma, Gamma)"]/*'/>
public static Gamma ProductAverageConditional(Gamma product, [Proper] Gamma A, [SkipIfUniform] Gamma B, Gamma q)
{
return ToGamma(GammaPowerProductOp_Laplace.ProductAverageConditional(GammaPower.FromGamma(product, 1), GammaPower.FromGamma(A, 1), GammaPower.FromGamma(B, 1), q, GammaPower.Uniform(1)));
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/message_doc[@name="AAverageConditional(Gamma, Gamma, Gamma, Gamma)"]/*'/>
public static Gamma AAverageConditional([SkipIfUniform] Gamma product, Gamma A, [SkipIfUniform] Gamma B, Gamma q)
{
return ToGamma(GammaPowerProductOp_Laplace.AAverageConditional(GammaPower.FromGamma(product, 1), GammaPower.FromGamma(A, 1), GammaPower.FromGamma(B, 1), q, GammaPower.Uniform(1)));
}
public static Gamma ToGamma(GammaPower gammaPower)
{
if (gammaPower.Power != 1) throw new Exception("gammaPower.Power != 1");
return Gamma.FromShapeAndRate(gammaPower.Shape, gammaPower.Rate);
}
}
}

Просмотреть файл

@ -1810,6 +1810,7 @@ namespace Microsoft.ML.Probabilistic.Factors
double a1 = -2 * x * ddlogf - x * x * dddlogf;
double da = -x * x * ddg + dx * a1;
m = g[0] + (MMath.Digamma(a) - Math.Log(a)) * da;
if (double.IsNaN(m)) throw new Exception("m is nan");
if (g.Length > 3)
{
double dddg = g[3];
@ -1821,6 +1822,7 @@ namespace Microsoft.ML.Probabilistic.Factors
v = dg * dx + (MMath.Trigamma(a) - 1 / a) * da * da + (MMath.Digamma(a) - Math.Log(a)) * dda;
//if (v < 0)
// throw new Exception("v < 0");
if (double.IsNaN(v)) throw new Exception("v is nan");
}
else
{

Просмотреть файл

@ -7,6 +7,92 @@ namespace Microsoft.ML.Probabilistic.Factors
using System;
using Microsoft.ML.Probabilistic.Distributions;
using Microsoft.ML.Probabilistic.Factors.Attributes;
using Microsoft.ML.Probabilistic.Math;
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="PlusGammaVmpOp"]/doc/*'/>
[FactorMethod(typeof(Factor), "Plus", typeof(double), typeof(double), Default = true)]
[Quality(QualityBand.Experimental)]
public static class PlusGammaOp
{
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="PlusGammaOp"]/message_doc[@name="SumAverageConditional(GammaPower, double)"]/*'/>
public static GammaPower SumAverageConditional([SkipIfUniform] GammaPower a, double b)
{
if (double.IsInfinity(b) || double.IsNaN(b)) throw new ArgumentOutOfRangeException(nameof(b), b, $"Argument is outside the range of supported values.");
if (a.IsUniform() || b == 0) return a;
else if (a.Power == 0) throw new ArgumentException($"Cannot add {b} to {a}");
else if (a.IsPointMass) return GammaPower.PointMass(a.Point + b, a.Power);
else if (a.Power < 0)
{
if (a.Shape <= a.Power) return a; // mode is at zero
// The mode is ((Shape - Power)/Rate)^Power
// We want to shift the mode by b, preserving the Shape and Power.
// This implies ((Shape - Power)/newRate)^Power = newMode
// newRate = (Shape - Power)/newMode^(1/Power)
// = (a.Shape - a.Power) * Math.Pow(a.GetMode() + b, -1 / a.Power);
//double logMode = a.Power * (Math.Log(Math.Max(0, a.Shape - a.Power)) - Math.Log(a.Rate));
//if (logMode > double.MaxValue) return a; // mode is at infinity
double logShapeMinusPower = Math.Log(a.Shape - a.Power);
double mode = a.GetMode();
if (mode > double.MaxValue) return a; // mode is at infinity
double newMode = Math.Max(0, mode + b);
double newLogMode = Math.Log(newMode);
// Find newLogRate to satisfy a.Power*(logShapeMinusPower - newLogRate) <= newLogMode
// logShapeMinusPower - newLogRate >= newLogMode/a.Power
// newLogRate - logShapeMinusPower <= -newLogMode/a.Power
double newLogModeOverPower = MMath.LargestDoubleRatio(newLogMode, -a.Power);
double newLogRate = MMath.LargestDoubleSum(logShapeMinusPower, newLogModeOverPower);
if ((double)((logShapeMinusPower - newLogRate) * a.Power) > newLogMode) throw new Exception();
// Ideally this would find largest newRate such that log(newRate) <= newLogRate
double newRate = Math.Exp(newLogRate);
if (logShapeMinusPower == newLogRate) newRate = a.Shape - a.Power;
if (a.Rate > 0) newRate = Math.Max(double.Epsilon, newRate);
if (!double.IsPositiveInfinity(a.Rate)) newRate = Math.Min(double.MaxValue, newRate);
return GammaPower.FromShapeAndRate(a.Shape, newRate, a.Power);
}
else if (!a.IsProper()) return a;
else
{
// The mean is Math.Exp(Power * (MMath.RisingFactorialLnOverN(Shape, Power) - Math.Log(Rate)))
// We want to shift the mean by b, preserving the Shape and Power.
// This implies log(newRate) = MMath.RisingFactorialLnOverN(Shape, Power) - log(newMean)/Power
double logShape = MMath.RisingFactorialLnOverN(a.Shape, a.Power);
//double logMean = a.GetLogMeanPower(1);
//double newLogMean = (b > 0) ?
// MMath.LogSumExp(logMean, Math.Log(b)) :
// MMath.LogDifferenceOfExp(logMean, Math.Log(-b));
double newMean = Math.Max(0, a.GetMean() + b);
double newLogMean = Math.Log(newMean);
// If logShape is big, this difference can lose accuracy
// Find newLogRate to satisfy logShape - newLogRate <= newLogMean/a.Power
double newLogMeanOverPower = MMath.LargestDoubleRatio(newLogMean, a.Power);
double newLogRate = -MMath.LargestDoubleSum(-logShape, newLogMeanOverPower);
// check: (logShape - newLogRate)*a.Power <= newLogMean
if ((double)((logShape - newLogRate) * a.Power) > newLogMean) throw new Exception();
double newRate = Math.Exp(newLogRate);
newRate = Math.Max(double.Epsilon, newRate);
if (!double.IsPositiveInfinity(a.Rate)) newRate = Math.Min(double.MaxValue, newRate);
return GammaPower.FromShapeAndRate(a.Shape, newRate, a.Power);
}
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="PlusGammaOp"]/message_doc[@name="SumAverageConditional(GammaPower, double)"]/*'/>
public static GammaPower SumAverageConditional(double a, [SkipIfUniform] GammaPower b)
{
return SumAverageConditional(b, a);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="PlusGammaOp"]/message_doc[@name="AAverageConditional(GammaPower, double)"]/*'/>
public static GammaPower AAverageConditional([SkipIfUniform] GammaPower sum, double b)
{
return SumAverageConditional(sum, -b);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="PlusGammaOp"]/message_doc[@name="BAverageConditional(GammaPower, double)"]/*'/>
public static GammaPower BAverageConditional([SkipIfUniform] GammaPower sum, double a)
{
return AAverageConditional(sum, a);
}
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="PlusGammaVmpOp"]/doc/*'/>
[FactorMethod(typeof(Factor), "Plus", typeof(double), typeof(double), Default = true)]

Просмотреть файл

@ -6,6 +6,7 @@ using System;
using Microsoft.ML.Probabilistic.Math;
using Microsoft.ML.Probabilistic.Distributions;
using Microsoft.ML.Probabilistic.Factors.Attributes;
using System.Diagnostics;
namespace Microsoft.ML.Probabilistic.Factors
{
@ -14,29 +15,183 @@ namespace Microsoft.ML.Probabilistic.Factors
[Quality(QualityBand.Experimental)]
public static class PowerOp
{
// Gamma = TruncatedGamma ^ y /////////////////////////////////////////////////////////
public static Gamma PowAverageConditional([SkipIfUniform] TruncatedGamma x, double y)
{
double mean = x.GetMeanPower(y);
if (x.LowerBound > 0)
{
double meanInverse = x.GetMeanPower(-y);
return GammaFromMeanAndMeanInverse(mean, meanInverse);
}
else
{
double variance = x.GetMeanPower(2 * y) - mean * mean;
return Gamma.FromMeanAndVariance(mean, variance);
}
}
public static Gamma GammaFromMeanAndMeanInverse(double mean, double meanInverse)
{
// mean = a/b
// meanInverse = b/(a-1)
// a = mean*meanInverse / (mean*meanInverse - 1)
// b = a/mean
double rate = meanInverse / (mean * meanInverse - 1);
double shape = mean * rate;
return Gamma.FromShapeAndRate(shape, rate);
}
public static TruncatedGamma XAverageConditional([SkipIfUniform] Gamma pow, TruncatedGamma x, double y)
{
// message computed below should be uniform when pow is uniform, but may not due to roundoff error.
if (pow.IsUniform()) return TruncatedGamma.Uniform();
// Factor is (x^y)^(pow.Shape/pow.Power - 1) * exp(-pow.Rate*(x^y)^1/pow.Power)
// =propto x^(pow.Shape/(pow.Power/y) - y) * exp(-pow.Rate*x^y/pow.Power)
// newShape/(pow.Power/y) - 1 = pow.Shape/(pow.Power/y) - y
// newShape = pow.Shape + (1-y)*(pow.Power/y)
double power = 1 / y;
var toPow = PowAverageConditional(x, y);
var powMarginal = pow * toPow;
// xMarginal2 is the exact distribution of pow^(1/y) where pow has distribution powMarginal
GammaPower xMarginal2 = GammaPower.FromShapeAndRate(powMarginal.Shape, powMarginal.Rate, power);
var xMarginal = new TruncatedGamma(GammaFromGammaPower(xMarginal2));
var result = xMarginal;
result.SetToRatio(xMarginal, x, GammaProductOp_Laplace.ForceProper);
return result;
}
// Gamma = Gamma ^ y /////////////////////////////////////////////////////////
public static Gamma PowAverageConditional([SkipIfUniform] Gamma x, double y, Gamma result)
{
GammaPower message = GammaPower.FromShapeAndRate(x.Shape, x.Rate, y);
return GammaFromGammaPower(message);
}
public static Gamma XAverageConditional([SkipIfUniform] Gamma pow, Gamma x, double y, Gamma result)
{
// message computed below should be uniform when pow is uniform, but may not due to roundoff error.
if (pow.IsUniform()) return Gamma.Uniform();
// Factor is (x^y)^(pow.Shape/pow.Power - 1) * exp(-pow.Rate*(x^y)^1/pow.Power)
// =propto x^(pow.Shape/(pow.Power/y) - y) * exp(-pow.Rate*x^y/pow.Power)
// newShape/(pow.Power/y) - 1 = pow.Shape/(pow.Power/y) - y
// newShape = pow.Shape + (1-y)*(pow.Power/y)
double power = 1 / y;
var toPow = PowAverageConditional(x, y, pow);
var powMarginal = pow * toPow;
// xMarginal2 is the exact distribution of pow^(1/y) where pow has distribution powMarginal
GammaPower xMarginal2 = GammaPower.FromShapeAndRate(powMarginal.Shape, powMarginal.Rate, power);
Gamma xMarginal = GammaFromGammaPower(xMarginal2);
result.SetToRatio(xMarginal, x, GammaProductOp_Laplace.ForceProper);
return result;
}
public static Gamma GammaFromGammaPower(GammaPower message)
{
if (message.Power == 1) return Gamma.FromShapeAndRate(message.Shape, message.Rate); // same as below, but faster
if (message.IsUniform()) return Gamma.Uniform();
message.GetMeanAndVariance(out double mean, out double variance);
return Gamma.FromMeanAndVariance(mean, variance);
}
// GammaPower = GammaPower ^ y /////////////////////////////////////////////////////////
public static GammaPower PowAverageConditional([SkipIfUniform] GammaPower x, double y)
public static double LogAverageFactor(GammaPower pow, GammaPower x, double y)
{
return GammaPower.FromShapeAndRate(x.Shape, x.Rate, y*x.Power);
// GetLogAverageOf =
// gammaln(shape1+shape2-power) - (shape1+shape2-power)*log(rate1+rate2) - log(|power|)
// -gammaln(shape1) + shape1 * log(rate1)
// -gammaln(shape2) + shape2 * log(rate2)
// d/dshape2 = digamma(shape1+shape2-power) - digamma(shape2) - log(rate1/rate2 + 1)
// d/drate2 = -(shape1+shape2-power)/(rate1+rate2) + shape2/rate2
GammaPower toPow = PowAverageConditional(x, y, pow);
return toPow.GetLogAverageOf(pow);
}
public static GammaPower XAverageConditional([SkipIfUniform] GammaPower pow, double y, GammaPower result)
public static GammaPower PowAverageConditional([SkipIfUniform] GammaPower x, double y, GammaPower result)
{
GammaPower message = GammaPower.FromShapeAndRate(x.Shape, x.Rate, y * x.Power);
return GammaPowerFromDifferentPower(message, result.Power);
}
public static GammaPower XAverageConditional([SkipIfUniform] GammaPower pow, GammaPower x, double y, GammaPower result)
{
// message computed below should be uniform when pow is uniform, but may not due to roundoff error.
if (pow.IsUniform()) return GammaPower.Uniform(result.Power);
// Factor is (x^y)^(pow.Shape/pow.Power - 1) * exp(-pow.Rate*(x^y)^1/pow.Power)
// =propto x^(pow.Shape/(pow.Power/y) - y) * exp(-pow.Rate*x^y/pow.Power)
// newShape/(pow.Power/y) - 1 = pow.Shape/(pow.Power/y) - y
// newShape = pow.Shape + (1-y)*(pow.Power/y)
double power = pow.Power / y;
if (power != result.Power)
throw new NotSupportedException("Outgoing message power (" + power + ") does not match the desired power (" + result.Power + ")");
return GammaPower.FromShapeAndRate(pow.Shape, pow.Rate, pow.Power / y);
var toPow = PowAverageConditional(x, y, pow);
GammaPower powMarginal = pow * toPow;
// xMarginal2 is the exact distribution of pow^(1/y) where pow has distribution powMarginal
GammaPower xMarginal2 = GammaPower.FromShapeAndRate(powMarginal.Shape, powMarginal.Rate, power);
GammaPower xMarginal = GammaPowerFromDifferentPower(xMarginal2, result.Power);
result.SetToRatio(xMarginal, x, GammaProductOp_Laplace.ForceProper);
return result;
}
public static GammaPower PowAverageLogarithm([SkipIfUniform] GammaPower x, double y)
public static Gamma FromMeanPowerAndMeanLog(double meanPower, double meanLog, double power)
{
return PowAverageConditional(x, y);
// We want E[log(x)] = meanLog but this sets E[log(x^power)] = meanLog, so we scale meanLog
var gammaPower = GammaPower.FromMeanAndMeanLog(meanPower, meanLog * power, power);
return Gamma.FromShapeAndRate(gammaPower.Shape, gammaPower.Rate);
}
public static GammaPower XAverageLogarithm([SkipIfUniform] GammaPower pow, double y, GammaPower result)
public static GammaPower GammaPowerFromDifferentPower(GammaPower message, double newPower)
{
return XAverageConditional(pow, y, result);
if (message.Power == newPower) return message; // same as below, but faster
if (message.IsUniform()) return GammaPower.Uniform(newPower);
// Making two hops ensures that the desired mean powers are finite.
if (message.Power > 0 && newPower < 0 && newPower != -1) return GammaPowerFromDifferentPower(GammaPowerFromDifferentPower(message, -1), newPower);
if (message.Power < 0 && newPower > 0 && newPower != 1) return GammaPowerFromDifferentPower(GammaPowerFromDifferentPower(message, 1), newPower);
// Project the message onto the desired power
if (newPower == 1 || newPower == -1 || newPower == 2)
{
message.GetMeanAndVariance(out double mean, out double variance);
if (!double.IsPositiveInfinity(mean))
return GammaPower.FromMeanAndVariance(mean, variance, newPower);
// Fall through
}
bool useMean = false;
if(useMean)
{
// Constraints:
// mean = Gamma(Shape + newPower)/Gamma(Shape)/Rate^newPower =approx (Shape/Rate)^newPower
// mean2 = Gamma(Shape + 2*newPower)/Gamma(Shape)/Rate^(2*newPower) =approx ((Shape + newPower)/Rate)^newPower * (Shape/Rate)^newPower
// mean2/mean^2 = Gamma(Shape + 2*newPower)*Gamma(Shape)/Gamma(Shape + newPower)^2 =approx ((Shape + newPower)/Shape)^newPower
// Shape =approx newPower/((mean2/mean^2)^(1/newPower) - 1)
// Rate = Shape/mean^(1/newPower)
message.GetMeanAndVariance(out double mean, out double variance);
double meanp = System.Math.Pow(mean, 1 / newPower);
double mean2p = System.Math.Pow(variance + mean * mean, 1 / newPower);
double shape = newPower / (mean2p / meanp / meanp - 1);
if (double.IsInfinity(shape)) return GammaPower.PointMass(mean, newPower);
double rate = shape / meanp;
return GammaPower.FromShapeAndRate(shape, rate, newPower);
}
else
{
// Compute the mean and variance of x^1/newPower
double mean = message.GetMeanPower(1 / newPower);
double mean2 = message.GetMeanPower(2 / newPower);
double variance = System.Math.Max(0, mean2 - mean * mean);
if (double.IsPositiveInfinity(mean*mean)) variance = mean;
return GammaPower.FromGamma(Gamma.FromMeanAndVariance(mean, variance), newPower);
}
}
public static GammaPower PowAverageLogarithm([SkipIfUniform] GammaPower x, double y, GammaPower result)
{
return PowAverageConditional(x, y, result);
}
public static GammaPower XAverageLogarithm([SkipIfUniform] GammaPower pow, GammaPower x, double y, GammaPower result)
{
return XAverageConditional(pow, x, y, result);
}
// GammaPower = Gamma ^ y //////////////////////////////////////////////////////////////
@ -50,7 +205,7 @@ namespace Microsoft.ML.Probabilistic.Factors
{
if (y != pow.Power)
throw new NotSupportedException("Incoming message " + pow + " does not match the exponent (" + y + ")");
return Gamma.FromShapeAndRate(pow.Shape, pow.Rate);
return Gamma.FromShapeAndRate(pow.Shape + (1 - y), pow.Rate);
}
public static GammaPower PowAverageLogarithm([SkipIfUniform] Gamma x, double y)

Просмотреть файл

@ -5,6 +5,7 @@
namespace Microsoft.ML.Probabilistic.Factors
{
using System;
using System.Diagnostics;
using Microsoft.ML.Probabilistic.Distributions;
using Microsoft.ML.Probabilistic.Math;
using Microsoft.ML.Probabilistic.Factors.Attributes;
@ -14,6 +15,12 @@ namespace Microsoft.ML.Probabilistic.Factors
[Quality(QualityBand.Preview)]
public static class GammaProductOp
{
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="ProductAverageConditional(double, double)"]/*'/>
public static Gamma ProductAverageConditional(double a, double b)
{
return Gamma.PointMass(a * b);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="ProductAverageConditional(double, Gamma)"]/*'/>
public static Gamma ProductAverageConditional(double A, [SkipIfUniform] Gamma B)
{
@ -26,10 +33,16 @@ namespace Microsoft.ML.Probabilistic.Factors
return ProductAverageConditional(B, A);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="ProductAverageConditional(double, double)"]/*'/>
public static Gamma ProductAverageConditional(double a, double b)
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="ProductAverageConditional(double, GammaPower)"]/*'/>
public static GammaPower ProductAverageConditional(double A, [SkipIfUniform] GammaPower B)
{
return Gamma.PointMass(a * b);
return GammaProductVmpOp.ProductAverageLogarithm(A, B);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="ProductAverageConditional(GammaPower, double)"]/*'/>
public static GammaPower ProductAverageConditional([SkipIfUniform] GammaPower A, double B)
{
return ProductAverageConditional(B, A);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="AAverageConditional(Gamma, double)"]/*'/>
@ -43,6 +56,36 @@ namespace Microsoft.ML.Probabilistic.Factors
return Gamma.FromShapeAndRate(Product.Shape, Product.Rate * B);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="AAverageConditional(Gamma, double, GammaPower)"]/*'/>
public static GammaPower AAverageConditional([SkipIfUniform] GammaPower Product, double B, GammaPower result)
{
if (Product.IsPointMass)
return AAverageConditional(Product.Point, B, result);
if (B == 0)
{
result.SetToUniform();
return result;
}
// (ab)^(shape/power-1) exp(-rate*(ab)^(1/power))
return GammaPower.FromShapeAndRate(Product.Shape, Product.Rate * Math.Pow(B, 1/result.Power), result.Power);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="AAverageConditional(double, double, GammaPower)"]/*'/>
public static GammaPower AAverageConditional(double Product, double B, GammaPower result)
{
if (B == 0)
{
if (Product != 0)
throw new AllZeroException();
result.SetToUniform();
}
else if ((Product != 0) && (Product > 0) != (B > 0))
throw new AllZeroException("Product and argument do not have the same sign");
else
result.Point = Product / B;
return result;
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="AAverageConditional(double, double)"]/*'/>
public static Gamma AAverageConditional(double Product, double B)
{
@ -101,12 +144,24 @@ namespace Microsoft.ML.Probabilistic.Factors
return AAverageConditional(Product, A);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="BAverageConditional(Gamma, double, GammaPower)"]/*'/>
public static GammaPower BAverageConditional([SkipIfUniform] GammaPower Product, double A, GammaPower result)
{
return AAverageConditional(Product, A, result);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="BAverageConditional(double, double)"]/*'/>
public static Gamma BAverageConditional(double Product, double A)
{
return AAverageConditional(Product, A);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="BAverageConditional(double, double, GammaPower)"]/*'/>
public static GammaPower BAverageConditional(double Product, double A, GammaPower result)
{
return AAverageConditional(Product, A, result);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="LogAverageFactor(Gamma, Gamma, double)"]/*'/>
public static double LogAverageFactor(Gamma product, Gamma a, double b)
{
@ -120,6 +175,19 @@ namespace Microsoft.ML.Probabilistic.Factors
return LogAverageFactor(product, b, a);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="LogAverageFactor(GammaPower, GammaPower, double)"]/*'/>
public static double LogAverageFactor(GammaPower product, GammaPower a, double b)
{
GammaPower to_product = GammaProductOp.ProductAverageConditional(a, b);
return to_product.GetLogAverageOf(product);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="LogAverageFactor(GammaPower, double, GammaPower)"]/*'/>
public static double LogAverageFactor(GammaPower product, double a, GammaPower b)
{
return LogAverageFactor(product, b, a);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="LogAverageFactor(double, Gamma, double)"]/*'/>
public static double LogAverageFactor(double product, Gamma a, double b)
{
@ -134,6 +202,20 @@ namespace Microsoft.ML.Probabilistic.Factors
return LogAverageFactor(product, b, a);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="LogAverageFactor(double, double, GammaPower)"]/*'/>
public static double LogAverageFactor(double product, double a, GammaPower b)
{
return LogAverageFactor(product, b, a);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="LogAverageFactor(double, GammaPower, double)"]/*'/>
public static double LogAverageFactor(double product, GammaPower a, double b)
{
if (b == 0) return (product == 0) ? 0.0 : double.NegativeInfinity;
GammaPower to_product = GammaProductOp.ProductAverageConditional(a, b);
return to_product.GetLogProb(product);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="LogEvidenceRatio(Gamma, Gamma, double)"]/*'/>
[Skip]
public static double LogEvidenceRatio(Gamma product, Gamma a, double b)
@ -159,6 +241,32 @@ namespace Microsoft.ML.Probabilistic.Factors
{
return LogEvidenceRatio(product, b, a);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="LogEvidenceRatio(GammaPower, GammaPower, double)"]/*'/>
[Skip]
public static double LogEvidenceRatio(GammaPower product, GammaPower a, double b)
{
return 0.0;
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="LogEvidenceRatio(GammaPower, double, GammaPower)"]/*'/>
[Skip]
public static double LogEvidenceRatio(GammaPower product, double a, GammaPower b)
{
return LogEvidenceRatio(product, b, a);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="LogEvidenceRatio(double, GammaPower, double)"]/*'/>
public static double LogEvidenceRatio(double product, GammaPower a, double b)
{
return LogAverageFactor(product, a, b);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp"]/message_doc[@name="LogEvidenceRatio(double, double, GammaPower)"]/*'/>
public static double LogEvidenceRatio(double product, double a, GammaPower b)
{
return LogEvidenceRatio(product, b, a);
}
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaRatioOp"]/doc/*'/>
@ -466,7 +574,7 @@ namespace Microsoft.ML.Probabilistic.Factors
GaussianOp_Laplace.LaplaceMoments(q, g, dlogfs(x, ratio, A), out bMean, out bVariance);
Gamma bMarginal = Gamma.FromMeanAndVariance(bMean, bVariance);
Gamma result = new Gamma();
result.SetToRatio(bMarginal, B, true);
result.SetToRatio(bMarginal, B, GammaProductOp_Laplace.ForceProper);
return result;
}
@ -514,255 +622,7 @@ namespace Microsoft.ML.Probabilistic.Factors
Gamma aMarginal = Gamma.FromMeanAndVariance(aMean, aVariance);
Gamma result = new Gamma();
result.SetToRatio(aMarginal, A, true);
if (double.IsNaN(result.Shape) || double.IsNaN(result.Rate))
throw new InferRuntimeException("result is nan");
return result;
}
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/doc/*'/>
[FactorMethod(typeof(Factor), "Product", typeof(double), typeof(double))]
[Buffers("Q")]
[Quality(QualityBand.Experimental)]
public static class GammaProductOp_Laplace
{
// derivatives of the factor marginalized over Product and A
private static double[] dlogfs(double b, Gamma product, Gamma A)
{
if (product.IsPointMass)
{
// int delta(ab - y) Ga(a; s, r) da = int delta(a' - y) Ga(a'/b; s, r)/b da' = Ga(y/b; s, r)/b
// logf = -s*log(b) - y*r/b
double p = 1 / b;
double p2 = p * p;
double shape = A.Shape;
double c = product.Point * A.Rate;
double dlogf = -shape * p + c * p2;
double ddlogf = shape * p2 - 2 * c * p2 * p;
double dddlogf = -2 * shape * p * p2 + 6 * c * p2 * p2;
double d4logf = 6 * shape * p2 * p2 - 24 * c * p2 * p2 * p;
return new double[] { dlogf, ddlogf, dddlogf, d4logf };
}
else if (A.IsPointMass)
{
// Ga(ab; y_a, y_b)
// logf = (y_a-1)*log(b) - y_b*ab
double p = 1 / b;
double p2 = p * p;
double c = product.Rate * A.Point;
double shape = product.Shape - 1;
double dlogf = shape * p - c;
double ddlogf = -shape * p2;
double dddlogf = 2 * shape * p * p2;
double d4logf = -6 * shape * p2 * p2;
return new double[] { dlogf, ddlogf, dddlogf, d4logf };
}
else
{
// int Ga(ab; y_s, y_r) Ga(a; s, r) da = y_r^(y_s) r^s b^(y_s-1) / (r + b y_r)^(y_s + s-1)
// logf = (y_s-1)*log(b) - (s+y_s-1)*log(r + b*y_r)
double r = product.Rate;
double r2 = r * r;
double p = 1 / (A.Rate + b * r);
double p2 = p * p;
double b2 = b * b;
double shape = product.Shape - 1;
double shape2 = GammaFromShapeAndRateOp_Slow.AddShapesMinus1(product.Shape, A.Shape);
double dlogf = shape / b - shape2 * p * r;
double ddlogf = -shape / b2 + shape2 * p2 * r2;
double dddlogf = 2 * shape / (b * b2) - 2 * shape2 * p * p2 * r2 * r;
double d4logf = -6 * shape / (b2 * b2) + 6 * shape2 * p2 * p2 * r2 * r2;
return new double[] { dlogf, ddlogf, dddlogf, d4logf };
}
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/message_doc[@name="QInit()"]/*'/>
[Skip]
public static Gamma QInit()
{
return Gamma.Uniform();
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/message_doc[@name="Q(Gamma, Gamma, Gamma)"]/*'/>
[Fresh]
public static Gamma Q(Gamma product, [Proper] Gamma A, [Proper] Gamma B)
{
if (B.IsPointMass)
return B;
if (A.IsPointMass)
throw new NotImplementedException();
double x;
if (product.IsPointMass)
{
// logf = -a_s*log(b) - y*a_r/b
// logp = b_s*log(b) - b_r*b
// dlogfp = (b_s-a_s)/b - b_r + y*a_r/b^2 = 0
// -b_r b^2 + (b_s-a_s) b + y*a_r = 0
double shape = B.Shape - A.Shape;
double y = product.Point;
x = (Math.Sqrt(shape * shape + 4 * B.Rate * A.Rate * y) + shape) / 2 / B.Rate;
}
else
{
double shape1 = GammaFromShapeAndRateOp_Slow.AddShapesMinus1(B.Shape, product.Shape);
double shape2 = GammaFromShapeAndRateOp_Slow.AddShapesMinus1(A.Shape, product.Shape);
// find the maximum of the factor marginalized over Product and A, times B
// logf = (y_s-1)*log(b) - (y_s+a_s-1)*log(b*y_r + a_r)
// let b' = b*y_r and maximize over b'
x = GammaFromShapeAndRateOp_Slow.FindMaximum(shape1, shape2, A.Rate, B.Rate / product.Rate);
if (x == 0)
return B;
x /= product.Rate;
}
double[] dlogfss = dlogfs(x, product, A);
double dlogf = dlogfss[0];
double ddlogf = dlogfss[1];
return GammaFromShapeAndRateOp_Laplace.GammaFromDerivatives(B, x, dlogf, ddlogf);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/message_doc[@name="LogAverageFactor(Gamma, Gamma, Gamma, Gamma)"]/*'/>
public static double LogAverageFactor(Gamma product, Gamma A, Gamma B, Gamma q)
{
if (B.IsPointMass)
return GammaProductOp.LogAverageFactor(product, A, B.Point);
if (A.IsPointMass)
return GammaProductOp.LogAverageFactor(product, A.Point, B);
double x = q.GetMean();
double logf;
if (product.IsPointMass)
{
// Ga(y/b; s, r)/b
double y = product.Point;
logf = (A.Shape - 1) * Math.Log(y) - A.Shape * Math.Log(x) - A.Rate * y / x - A.GetLogNormalizer();
}
else
{
// int Ga(ab; y_s, y_r) Ga(a; s, r) da = b^(y_s-1) / (r + b y_r)^(y_s + s-1) Gamma(y_s+s-1)
double shape = product.Shape - 1;
double shape2 = GammaFromShapeAndRateOp_Slow.AddShapesMinus1(product.Shape, A.Shape);
logf = shape * Math.Log(x) - shape2 * Math.Log(A.Rate + x * product.Rate) +
MMath.GammaLn(shape2) - A.GetLogNormalizer() - product.GetLogNormalizer();
}
double logz = logf + B.GetLogProb(x) - q.GetLogProb(x);
return logz;
}
private static double LogAverageFactor_slow(Gamma product, Gamma A, [Proper] Gamma B)
{
return LogAverageFactor(product, A, B, Q(product, A, B));
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/message_doc[@name="LogEvidenceRatio(Gamma, Gamma, Gamma, Gamma, Gamma)"]/*'/>
public static double LogEvidenceRatio(Gamma product, Gamma A, Gamma B, Gamma to_product, Gamma q)
{
return LogAverageFactor(product, A, B, q) - to_product.GetLogAverageOf(product);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/message_doc[@name="LogEvidenceRatio(double, Gamma, Gamma, Gamma)"]/*'/>
public static double LogEvidenceRatio(double product, Gamma A, Gamma B, Gamma q)
{
return LogAverageFactor(Gamma.PointMass(product), A, B, q);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/message_doc[@name="BAverageConditional(Gamma, Gamma, Gamma, Gamma)"]/*'/>
public static Gamma BAverageConditional([SkipIfUniform] Gamma product, [Proper] Gamma A, [Proper] Gamma B, Gamma q)
{
if (A.IsPointMass)
return GammaProductOp.BAverageConditional(product, A.Point);
if (B.IsPointMass)
throw new NotImplementedException();
if (q.IsUniform())
q = Q(product, A, B);
double x = q.GetMean();
double[] g = new double[] { x, 1, 0, 0 };
double bMean, bVariance;
GaussianOp_Laplace.LaplaceMoments(q, g, dlogfs(x, product, A), out bMean, out bVariance);
Gamma bMarginal = Gamma.FromMeanAndVariance(bMean, bVariance);
Gamma result = new Gamma();
result.SetToRatio(bMarginal, B, true);
return result;
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/message_doc[@name="ProductAverageConditional(Gamma, Gamma, Gamma, Gamma)"]/*'/>
public static Gamma ProductAverageConditional(Gamma product, [Proper] Gamma A, [SkipIfUniform] Gamma B, Gamma q)
{
if (B.IsPointMass)
return GammaProductOp.ProductAverageConditional(A, B.Point);
if (A.IsPointMass)
return GammaProductOp.ProductAverageConditional(A.Point, B);
if (product.IsPointMass)
throw new NotImplementedException();
double yMean, yVariance;
double x = q.GetMean();
double x2 = x * x;
double r = product.Rate;
double r2 = r * r;
double p = 1 / (x * r + A.Rate);
double p2 = p * p;
double shape2 = GammaFromShapeAndRateOp_Slow.AddShapesMinus1(product.Shape, A.Shape);
// yMean = shape2*b/(b y_r + a_r)
// yVariance = E[shape2*(shape2+1)*b^2/(b y_r + a_r)^2] - aMean^2 = var(shape2*b/(b y_r + a_r)) + E[shape2*b^2/(b y_r + a_r)^2]
// = shape2^2*var(b/(b y_r + a_r)) + shape2*(var(b/(b y_r + a_r)) + (aMean/shape2)^2)
double[] g = new double[] { x * p, A.Rate * p2, -2 * p2 * p * A.Rate * r, 6 * p2 * p2 * A.Rate * r2 };
double pMean, pVariance;
GaussianOp_Laplace.LaplaceMoments(q, g, dlogfs(x, product, A), out pMean, out pVariance);
yMean = shape2 * pMean;
yVariance = shape2 * shape2 * pVariance + shape2 * (pVariance + pMean * pMean);
Gamma yMarginal = Gamma.FromMeanAndVariance(yMean, yVariance);
Gamma result = new Gamma();
result.SetToRatio(yMarginal, product, true);
if (double.IsNaN(result.Shape) || double.IsNaN(result.Rate))
throw new InferRuntimeException("result is nan");
return result;
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductOp_Laplace"]/message_doc[@name="AAverageConditional(Gamma, Gamma, Gamma, Gamma)"]/*'/>
public static Gamma AAverageConditional(Gamma product, Gamma A, [SkipIfUniform] Gamma B, Gamma q)
{
if (B.IsPointMass)
return GammaProductOp.AAverageConditional(product, B.Point);
if (A.IsPointMass)
throw new NotImplementedException();
double aMean, aVariance;
double x = q.GetMean();
if (product.IsPointMass)
{
// Z = int Ga(y/b; s, r)/b Ga(b; b_s, b_r) db
// E[a] = E[y/b]
// E[a^2] = E[y^2/b^2]
// aVariance = E[a^2] - aMean^2
double y = product.Point;
double p = 1 / x;
double p2 = p * p;
double[] g = new double[] { p, -p2, 2 * p2 * p, -6 * p2 * p2 };
double pMean, pVariance;
GaussianOp_Laplace.LaplaceMoments(q, g, dlogfs(x, product, A), out pMean, out pVariance);
aMean = y * pMean;
aVariance = y * y * pVariance;
}
else
{
double x2 = x * x;
double r = product.Rate;
double r2 = r * r;
double p = 1 / (x * r + A.Rate);
double p2 = p * p;
double shape2 = GammaFromShapeAndRateOp_Slow.AddShapesMinus1(product.Shape, A.Shape);
// aMean = shape2/(b y_r + a_r)
// aVariance = E[shape2*(shape2+1)/(b y_r + a_r)^2] - aMean^2 = var(shape2/(b y_r + a_r)) + E[shape2/(b y_r + a_r)^2]
// = shape2^2*var(1/(b y_r + a_r)) + shape2*(var(1/(b y_r + a_r)) + (aMean/shape2)^2)
double[] g = new double[] { p, -r * p2, 2 * p2 * p * r2, -6 * p2 * p2 * r2 * r };
double pMean, pVariance;
GaussianOp_Laplace.LaplaceMoments(q, g, dlogfs(x, product, A), out pMean, out pVariance);
aMean = shape2 * pMean;
aVariance = shape2 * shape2 * pVariance + shape2 * (pVariance + pMean * pMean);
}
Gamma aMarginal = Gamma.FromMeanAndVariance(aMean, aVariance);
Gamma result = new Gamma();
result.SetToRatio(aMarginal, A, true);
result.SetToRatio(aMarginal, A, GammaProductOp_Laplace.ForceProper);
if (double.IsNaN(result.Shape) || double.IsNaN(result.Rate))
throw new InferRuntimeException("result is nan");
return result;
@ -784,12 +644,28 @@ namespace Microsoft.ML.Probabilistic.Factors
return (a == 0) ? 0.0 : -Math.Log(a);
}
public static double AverageLogFactor(double product, GammaPower a, double b)
{
return (b == 0) ? 0.0 : -Math.Log(b);
}
public static double AverageLogFactor(double product, double a, GammaPower b)
{
return (a == 0) ? 0.0 : -Math.Log(a);
}
[Skip]
public static double AverageLogFactor(Gamma product)
{
return 0.0;
}
[Skip]
public static double AverageLogFactor(GammaPower product)
{
return 0.0;
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductVmpOp"]/message_doc[@name="ProductAverageLogarithm(Gamma, Gamma)"]/*'/>
public static Gamma ProductAverageLogarithm([SkipIfUniform] Gamma A, [SkipIfUniform] Gamma B)
{
@ -803,6 +679,8 @@ namespace Microsoft.ML.Probabilistic.Factors
{
if (B.IsPointMass)
return Gamma.PointMass(A * B.Point);
if (A == 0)
return Gamma.PointMass(0);
return Gamma.FromShapeAndRate(B.Shape, B.Rate / A);
}
@ -812,6 +690,22 @@ namespace Microsoft.ML.Probabilistic.Factors
return ProductAverageLogarithm(B, A);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductVmpOp"]/message_doc[@name="ProductAverageLogarithm(double, Gamma)"]/*'/>
public static GammaPower ProductAverageLogarithm(double A, [SkipIfUniform] GammaPower B)
{
if (B.IsPointMass)
return GammaPower.PointMass(A * B.Point, B.Power);
if (A == 0)
return GammaPower.PointMass(0, B.Power);
return GammaPower.FromShapeAndRate(B.Shape, B.Rate * Math.Pow(A, -1/B.Power), B.Power);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductVmpOp"]/message_doc[@name="ProductAverageLogarithm(Gamma, double)"]/*'/>
public static GammaPower ProductAverageLogarithm([SkipIfUniform] GammaPower A, double B)
{
return ProductAverageLogarithm(B, A);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductVmpOp"]/message_doc[@name="AAverageLogarithm(Gamma, Gamma)"]/*'/>
public static Gamma AAverageLogarithm([SkipIfUniform] Gamma Product, [Proper] Gamma B)
{
@ -836,12 +730,24 @@ namespace Microsoft.ML.Probabilistic.Factors
return GammaProductOp.AAverageConditional(Product, B);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductVmpOp"]/message_doc[@name="AAverageLogarithm(GammaPower, double, GammaPower)"]/*'/>
public static GammaPower AAverageLogarithm([SkipIfUniform] GammaPower Product, double B, GammaPower result)
{
return GammaProductOp.AAverageConditional(Product, B, result);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductVmpOp"]/message_doc[@name="AAverageLogarithm(double, double)"]/*'/>
public static Gamma AAverageLogarithm(double Product, double B)
{
return GammaProductOp.AAverageConditional(Product, B);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductVmpOp"]/message_doc[@name="AAverageLogarithm(double, double, GammaPower)"]/*'/>
public static GammaPower AAverageLogarithm(double Product, double B, GammaPower result)
{
return GammaProductOp.AAverageConditional(Product, B, result);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductVmpOp"]/message_doc[@name="BAverageLogarithm(Gamma, Gamma)"]/*'/>
public static Gamma BAverageLogarithm([SkipIfUniform] Gamma Product, [Proper] Gamma A)
{
@ -866,6 +772,18 @@ namespace Microsoft.ML.Probabilistic.Factors
{
return AAverageLogarithm(Product, A);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductVmpOp"]/message_doc[@name="BAverageLogarithm(Gamma, double, GammaPower)"]/*'/>
public static GammaPower BAverageLogarithm([SkipIfUniform] GammaPower Product, double A, GammaPower result)
{
return AAverageLogarithm(Product, A, result);
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaProductVmpOp"]/message_doc[@name="BAverageLogarithm(double, double, GammaPower)"]/*'/>
public static GammaPower BAverageLogarithm(double Product, double A, GammaPower result)
{
return AAverageLogarithm(Product, A, result);
}
}
/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GammaRatioVmpOp"]/doc/*'/>

Просмотреть файл

@ -993,7 +993,7 @@ mp.dps = 500
mp.pretty = True
gammainc(mpf('1'),mpf('1'),mpf('inf'),regularized=True)
*/
double[,] gammaUpper_pairs = new double[,] {
double[,] gammaUpperRegularized_pairs = new double[,] {
{1e-20,0.3,9.0567665167584664E-21},
{1e-6,1e-1,0.000001822923025350097857684},
{System.Math.Pow(0.5,5),1,0.0070733414923541296841667565874 },
@ -1033,7 +1033,20 @@ gammainc(mpf('1'),mpf('1'),mpf('inf'),regularized=True)
{double.PositiveInfinity,1,1 },
{double.Epsilon,0,1 },
};
CheckFunctionValues("GammaUpper", MMath.GammaUpper, gammaUpper_pairs);
CheckFunctionValues("GammaUpperRegularized", (a,x) => MMath.GammaUpper(a, x, true), gammaUpperRegularized_pairs);
/* In python mpmath:
from mpmath import *
mp.dps = 500
mp.pretty = True
gammainc(mpf('1'),mpf('1'),mpf('inf'),regularized=True)
*/
double[,] gammaUpper_pairs = new double[,] {
{1e-20,0.3,0.9056766516758467124267199175638778988963728798249333},
{1e-6,1e-1,1.8229219731321746872065707723366373632},
{2,1,0.73575888234288464319104754 },
};
CheckFunctionValues("GammaUpper", (a, x) => MMath.GammaUpper(a, x, false), gammaUpper_pairs);
}
[Fact]

Просмотреть файл

@ -4,11 +4,15 @@
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using Xunit;
using Microsoft.ML.Probabilistic.Distributions;
using Microsoft.ML.Probabilistic.Math;
using Microsoft.ML.Probabilistic.Utilities;
using GaussianArray2D = Microsoft.ML.Probabilistic.Distributions.DistributionStructArray2D<Microsoft.ML.Probabilistic.Distributions.Gaussian, double>;
using System.Threading.Tasks;
using System.Threading;
namespace Microsoft.ML.Probabilistic.Tests
{
@ -51,23 +55,124 @@ namespace Microsoft.ML.Probabilistic.Tests
SamplingTest(a, 3);
}
[Fact]
public void GammaPowerFromMeanAndMeanLogTest()
{
double mean = 3;
double meanLog = 0.4;
double power = 5;
GammaPower gammaPower = GammaPower.FromMeanAndMeanLog(mean, meanLog, power);
Assert.Equal(mean, gammaPower.GetMean(), 1e-1);
Assert.Equal(meanLog, gammaPower.GetMeanLog(), 1e-1);
}
[Fact]
public void Gamma_GetMode_MaximizesGetLogProb()
{
Parallel.ForEach(OperatorTests.Gammas()/*.Take(100000)*/, gamma =>
{
double max = double.NegativeInfinity;
foreach (var x in OperatorTests.DoublesAtLeastZero())
{
double logProb = gamma.GetLogProb(x);
Assert.False(double.IsNaN(logProb));
if (logProb > max)
{
max = logProb;
}
}
double mode = gamma.GetMode();
Assert.False(double.IsNaN(mode));
double logProbBelowMode = gamma.GetLogProb(MMath.PreviousDouble(mode));
Assert.False(double.IsNaN(logProbBelowMode));
double logProbAboveMode = gamma.GetLogProb(MMath.NextDouble(mode));
Assert.False(double.IsNaN(logProbAboveMode));
double logProbAtMode = gamma.GetLogProb(mode);
Assert.False(double.IsNaN(logProbAtMode));
logProbAtMode = System.Math.Max(System.Math.Max(logProbAtMode, logProbAboveMode), logProbBelowMode);
const double smallestNormalized = 1e-308;
Assert.True(logProbAtMode >= max ||
MMath.AbsDiff(logProbAtMode, max, 1e-8) < 1e-4 ||
(mode == 0 && gamma.GetLogProb(smallestNormalized) >= max)
);
});
}
[Fact]
public void GammaPower_GetMode_MaximizesGetLogProb()
{
long count = 0;
Parallel.ForEach(new[] {
GammaPower.FromShapeAndRate(1.7976931348623157E+308, 1.7976931348623157E+308, -1.7976931348623157E+308),
}.Concat(OperatorTests.GammaPowers()).Take(100000), gammaPower =>
{
double argmax = double.NaN;
double max = double.NegativeInfinity;
foreach (var x in OperatorTests.DoublesAtLeastZero())
{
double logProb = gammaPower.GetLogProb(x);
Assert.False(double.IsNaN(logProb));
if (logProb > max)
{
max = logProb;
argmax = x;
}
}
double mode = gammaPower.GetMode();
Assert.False(double.IsNaN(mode));
double logProbBelowMode = gammaPower.GetLogProb(MMath.PreviousDouble(mode));
Assert.False(double.IsNaN(logProbBelowMode));
double logProbAboveMode = gammaPower.GetLogProb(MMath.NextDouble(mode));
Assert.False(double.IsNaN(logProbAboveMode));
double logProbAtMode = gammaPower.GetLogProb(mode);
Assert.False(double.IsNaN(logProbAtMode));
logProbAtMode = System.Math.Max(System.Math.Max(logProbAtMode, logProbAboveMode), logProbBelowMode);
const double smallestNormalized = 1e-308;
Assert.True(logProbAtMode >= max ||
MMath.AbsDiff(logProbAtMode, max, 1e-8) < 1e-8 ||
(mode <= double.Epsilon && gammaPower.GetLogProb(smallestNormalized) >= max)
);
Interlocked.Add(ref count, 1);
if(count % 100000 == 0)
Trace.WriteLine($"{count} cases passed");
});
Trace.WriteLine($"{count} cases passed");
}
[Fact]
public void GammaPowerTest()
{
foreach (var gammaPower in new[] {
GammaPower.FromShapeAndRate(3, 2, -4.0552419045546273),
new GammaPower(0.04591, 19.61, -1),
})
{
gammaPower.GetMeanAndVariance(out double mean, out double variance);
Assert.False(double.IsNaN(mean));
Assert.False(double.IsNaN(variance));
Assert.False(mean < 0);
Assert.False(variance < 0);
Assert.Equal(variance, gammaPower.GetVariance());
}
Assert.Equal(0, GammaPower.FromShapeAndRate(2, 0, -1).GetMean());
Assert.Equal(0, GammaPower.FromShapeAndRate(2, 0, -1).GetVariance());
Assert.True(GammaPower.FromShapeAndRate(2, double.PositiveInfinity, -1).IsPointMass);
GammaPower g = new GammaPower(1, 1, -1);
g.ToString();
Gamma gamma = new Gamma(1, 1);
double expectedProbLessThan = gamma.GetProbLessThan(2);
Assert.Equal(expectedProbLessThan, 1-g.GetProbLessThan(0.5), 1e-10);
Assert.Equal(expectedProbLessThan, 1 - g.GetProbLessThan(0.5), 1e-10);
Assert.Equal(2, gamma.GetQuantile(expectedProbLessThan), 1e-10);
Assert.Equal(0.5, g.GetQuantile(1 - expectedProbLessThan), 1e-10);
GammaPower(1);
GammaPower(-1);
GammaPower(2);
GammaPowerMomentTest(1);
GammaPowerMomentTest(-1);
GammaPowerMomentTest(2);
}
private void GammaPower(double power)
private void GammaPowerMomentTest(double power)
{
GammaPower g = new GammaPower(9.9, 1, power);
GammaPower g2 = new GammaPower(4.4, 3.3, power);
@ -80,6 +185,32 @@ namespace Microsoft.ML.Probabilistic.Tests
g.SetToUniform();
}
[Fact]
public void GammaPowerMeanAndVarianceFuzzTest()
{
foreach(var gammaPower in OperatorTests.GammaPowers().Take(100000))
{
gammaPower.GetMeanAndVariance(out double mean, out double variance);
Assert.False(double.IsNaN(mean));
Assert.False(double.IsNaN(variance));
Assert.False(mean < 0);
Assert.False(variance < 0);
}
}
[Fact]
public void GammaMeanAndVarianceFuzzTest()
{
foreach (var gamma in OperatorTests.Gammas())
{
gamma.GetMeanAndVariance(out double mean, out double variance);
Assert.False(double.IsNaN(mean));
Assert.False(double.IsNaN(variance));
Assert.False(mean < 0);
Assert.False(variance < 0);
}
}
//[Fact]
internal void WrappedGaussianTest()
{
@ -199,6 +330,24 @@ namespace Microsoft.ML.Probabilistic.Tests
}
}
/// <summary>
/// Checks that TruncatedGamma.GetMeanPower does not return infinity or NaN for proper distributions.
/// </summary>
[Fact]
public void TruncatedGamma_GetMeanPower()
{
double shape = 1;
TruncatedGamma g = new TruncatedGamma(shape, 1, 1, double.PositiveInfinity);
for (int i = 0; i < 100; i++)
{
var meanPower = g.GetMeanPower(-i);
Trace.WriteLine($"GetMeanPower({-i}) = {meanPower}");
Assert.False(double.IsNaN(meanPower));
Assert.False(double.IsInfinity(meanPower));
if (i == 1) Assert.Equal(MMath.GammaUpper(shape-1, 1, false)/MMath.GammaUpper(shape, 1, false), meanPower, 1e-8);
}
}
[Fact]
public void GaussianTest()
{
@ -266,10 +415,10 @@ namespace Microsoft.ML.Probabilistic.Tests
g.SetMeanAndPrecision(1e4, 1e306);
Assert.Equal(Gaussian.FromMeanAndPrecision(1e4, double.MaxValue / 1e4), g);
Assert.Equal(Gaussian.FromMeanAndPrecision(1e4, double.MaxValue/1e4), new Gaussian(1e4, 1E-306));
Assert.Equal(Gaussian.FromMeanAndPrecision(1e4, double.MaxValue / 1e4), new Gaussian(1e4, 1E-306));
Assert.Equal(Gaussian.PointMass(1e-155), new Gaussian(1e-155, 1E-312));
Gaussian.FromNatural(1, 1e-309).GetMeanAndVarianceImproper(out m, out v);
if(v > double.MaxValue)
if (v > double.MaxValue)
Assert.Equal(0, m);
Gaussian.Uniform().GetMeanAndVarianceImproper(out m, out v);
Assert.Equal(0, m);
@ -300,7 +449,7 @@ namespace Microsoft.ML.Probabilistic.Tests
{
GaussianSetToProduct_ProducesPointMass(Gaussian.FromMeanAndPrecision(1, double.MaxValue));
GaussianSetToProduct_ProducesPointMass(Gaussian.FromMeanAndPrecision(0.9, double.MaxValue));
GaussianSetToProduct_ProducesPointMass(Gaussian.FromMeanAndPrecision(10, double.MaxValue/10));
GaussianSetToProduct_ProducesPointMass(Gaussian.FromMeanAndPrecision(10, double.MaxValue / 10));
}
private void GaussianSetToProduct_ProducesPointMass(Gaussian g)
@ -317,8 +466,8 @@ namespace Microsoft.ML.Probabilistic.Tests
public void GammaSetToProduct_ProducesPointMassTest()
{
GammaSetToProduct_ProducesPointMass(Gamma.FromShapeAndRate(double.MaxValue, double.MaxValue));
GammaSetToProduct_ProducesPointMass(Gamma.FromShapeAndRate(double.MaxValue/10, double.MaxValue));
GammaSetToProduct_ProducesPointMass(Gamma.FromShapeAndRate(double.MaxValue, double.MaxValue/10));
GammaSetToProduct_ProducesPointMass(Gamma.FromShapeAndRate(double.MaxValue / 10, double.MaxValue));
GammaSetToProduct_ProducesPointMass(Gamma.FromShapeAndRate(double.MaxValue, double.MaxValue / 10));
}
private void GammaSetToProduct_ProducesPointMass(Gamma g)
@ -434,7 +583,7 @@ namespace Microsoft.ML.Probabilistic.Tests
PositiveDefiniteMatrix B = new PositiveDefiniteMatrix(4, 4);
B.SetToDiagonal(Vector.FromArray(1, 0, 3, 4));
VectorGaussianMoments vg1 = new VectorGaussianMoments(Vector.FromArray(6,5,4,3), A);
VectorGaussianMoments vg1 = new VectorGaussianMoments(Vector.FromArray(6, 5, 4, 3), A);
VectorGaussianMoments vg2 = new VectorGaussianMoments(Vector.FromArray(1, 2, 3, 4), B);
var product = vg1 * vg2;
@ -501,7 +650,7 @@ namespace Microsoft.ML.Probabilistic.Tests
Assert.Equal(Gamma.PointMass(0), Gamma.FromShapeAndScale(2.5, 1e-320));
Assert.Equal(Gamma.PointMass(0), new Gamma(2.5, 1e-320));
Assert.Equal(Gamma.PointMass(0), new Gamma(2.5, 0));
Assert.Equal(Gamma.PointMass(1e-300), Gamma.FromShapeAndRate(2, 1e300)^1e10);
Assert.Equal(Gamma.PointMass(1e-300), Gamma.FromShapeAndRate(2, 1e300) ^ 1e10);
ProductWithUniformTest(g);
Gamma g2 = new Gamma();
@ -1484,25 +1633,29 @@ namespace Microsoft.ML.Probabilistic.Tests
[Fact]
public void GammaSetToRatioProperTest()
{
Gamma numerator = Gamma.FromShapeAndRate(4, 5);
Gamma denominator = Gamma.FromShapeAndRate(6, 3);
Gamma r = new Gamma();
r.SetToRatio(numerator, denominator, true);
Console.WriteLine("ratio: {0}", r);
Console.WriteLine("ratio*denom: {0} (numerator was {1})", r * denominator, numerator);
Assert.True(MMath.AbsDiff((r * denominator).GetMean(), numerator.GetMean(), 1e-8) < 1e-10);
denominator.SetShapeAndRate(6, 7);
r.SetToRatio(numerator, denominator, true);
Console.WriteLine("ratio: {0}", r);
Console.WriteLine("ratio*denom: {0} (numerator was {1})", r * denominator, numerator);
Assert.True(MMath.AbsDiff((r * denominator).GetMean(), numerator.GetMean(), 1e-8) < 1e-10);
denominator.SetShapeAndRate(3, 7);
r.SetToRatio(numerator, denominator, true);
Console.WriteLine("ratio: {0}", r);
Console.WriteLine("ratio*denom: {0} (numerator was {1})", r * denominator, numerator);
Assert.True(MMath.AbsDiff((r * denominator).GetMean(), numerator.GetMean(), 1e-8) < 1e-10);
foreach (Gamma numerator in new[] {
Gamma.FromShapeAndRate(0.5, 0.5),
Gamma.FromShapeAndRate(4, 5),
})
{
foreach (var denominator in new[]
{
Gamma.Uniform(),
Gamma.FromShapeAndRate(6, 3),
Gamma.FromShapeAndRate(6,7),
Gamma.FromShapeAndRate(3,7)
})
{
Gamma r = new Gamma();
r.SetToRatio(numerator, denominator, true);
//Trace.WriteLine($"ratio: {r} ratio*denom: {r * denominator} (numerator was {numerator})");
Assert.True(r.Shape >= 1);
Assert.True(r.Rate >= 0);
Assert.True(MMath.AbsDiff((r * denominator).GetMean(), numerator.GetMean(), 1e-8) < 1e-10);
// It is counter-intuitive that uniform denominator doesn't return numerator.
//if (denominator.IsUniform()) Assert.Equal(numerator, r);
}
}
}
[Fact]
@ -1510,24 +1663,18 @@ namespace Microsoft.ML.Probabilistic.Tests
{
int dim = 1;
Wishart numerator = Wishart.FromShapeAndRate(4, PositiveDefiniteMatrix.IdentityScaledBy(dim, 5));
Wishart denominator = Wishart.FromShapeAndRate(6, PositiveDefiniteMatrix.IdentityScaledBy(dim, 3));
Wishart r = new Wishart(dim);
r.SetToRatio(numerator, denominator, true);
Console.WriteLine("ratio: {0}", r);
Console.WriteLine("ratio*denom: {0} (numerator was {1})", r * denominator, numerator);
Assert.True((r * denominator).GetMean().MaxDiff(numerator.GetMean()) < 1e-10);
denominator.SetShapeAndRate(6, PositiveDefiniteMatrix.IdentityScaledBy(dim, 7));
r.SetToRatio(numerator, denominator, true);
Console.WriteLine("ratio: {0}", r);
Console.WriteLine("ratio*denom: {0} (numerator was {1})", r * denominator, numerator);
Assert.True((r * denominator).GetMean().MaxDiff(numerator.GetMean()) < 1e-10);
denominator.SetShapeAndRate(3, PositiveDefiniteMatrix.IdentityScaledBy(dim, 7));
r.SetToRatio(numerator, denominator, true);
Console.WriteLine("ratio: {0}", r);
Console.WriteLine("ratio*denom: {0} (numerator was {1})", r * denominator, numerator);
Assert.True((r * denominator).GetMean().MaxDiff(numerator.GetMean()) < 1e-10);
foreach (Wishart denominator in new[] {
Wishart.FromShapeAndRate(6, PositiveDefiniteMatrix.IdentityScaledBy(dim, 3)),
Wishart.FromShapeAndRate(6, PositiveDefiniteMatrix.IdentityScaledBy(dim, 7)),
Wishart.FromShapeAndRate(3, PositiveDefiniteMatrix.IdentityScaledBy(dim, 7)),
})
{
Wishart r = new Wishart(dim);
r.SetToRatio(numerator, denominator, true);
//Trace.WriteLine($"ratio: {r} ratio*denom: {r * denominator} (numerator was {numerator})");
Assert.True(r.Shape >= (dim + 1) / 2.0);
Assert.True((r * denominator).GetMean().MaxDiff(numerator.GetMean()) < 1e-10);
}
}
[Fact]
@ -1579,7 +1726,7 @@ namespace Microsoft.ML.Probabilistic.Tests
stringDistributionTest(dist1, dist2);
stringDistributionTest(dist1, dist3);
stringDistributionTest(dist2, dist3);
StringDistributionPointMassTest(dist1, "ab");
StringDistributionPointMassTest(dist2, "Abc");
StringDistributionPointMassTest(dist3, "ABcd");

Просмотреть файл

@ -20,6 +20,7 @@ using Assert = Xunit.Assert;
using Microsoft.ML.Probabilistic.Algorithms;
using Microsoft.ML.Probabilistic.Models.Attributes;
using Microsoft.ML.Probabilistic.Serialization;
using Microsoft.ML.Probabilistic.Compiler;
namespace Microsoft.ML.Probabilistic.Tests
{
@ -37,6 +38,168 @@ namespace Microsoft.ML.Probabilistic.Tests
return Util.ArrayInit(count, i => (min + i * inc));
}
[Fact]
public void GammaPower_ReturnsShapeGreaterThan1()
{
Variable<TruncatedGamma> xPriorVar = Variable.Observed(default(TruncatedGamma)).Named("xPrior");
Variable<double> x = Variable<double>.Random(xPriorVar).Named("x");
Variable<double> power = Variable.Observed(0.5).Named("power");
var y = x ^ power;
y.Name = nameof(y);
Variable<Gamma> yLikeVar = Variable.Observed(default(Gamma)).Named("yLike");
Variable.ConstrainEqualRandom(y, yLikeVar);
y.SetMarginalPrototype(yLikeVar);
InferenceEngine engine = new InferenceEngine();
foreach(var powerValue in linspace(1, 10, 10))
{
TruncatedGamma xPrior = new TruncatedGamma(Gamma.FromShapeAndRate(3, 3), 1, double.PositiveInfinity);
xPriorVar.ObservedValue = xPrior;
Gamma yLike = Gamma.Uniform();
yLikeVar.ObservedValue = yLike;
power.ObservedValue = powerValue;
var yActual = engine.Infer<Gamma>(y);
// Importance sampling
GammaEstimator xEstimator = new GammaEstimator();
GammaEstimator yEstimator = new GammaEstimator();
MeanVarianceAccumulator mva = new MeanVarianceAccumulator();
int nSamples = 1000000;
for (int i = 0; i < nSamples; i++)
{
double xSample = xPrior.Sample();
double ySample = System.Math.Pow(xSample, power.ObservedValue);
double logWeight = yLike.GetLogProb(ySample);
double weight = System.Math.Exp(logWeight);
xEstimator.Add(xSample, weight);
yEstimator.Add(ySample, weight);
mva.Add(1/ySample, weight);
}
Gamma xExpected = xEstimator.GetDistribution(new Gamma());
Gamma yExpected = yEstimator.GetDistribution(yLike);
double yActualMeanInverse = yActual.GetMeanPower(-1);
double meanInverseError = MMath.AbsDiff(mva.Mean, yActualMeanInverse, 1e-8);
Trace.WriteLine($"power = {powerValue}: y = {yActual}[E^-1={yActual.GetMeanPower(-1)}] should be {yExpected}[E^-1={mva.Mean}], error = {meanInverseError}");
Assert.True(yActual.Shape > 1);
Assert.True(MMath.AbsDiff(yExpected.GetMean(), yActual.GetMean(), 1e-8) < 1);
Assert.True(meanInverseError < 1e-2);
}
}
[Fact]
public void GammaPowerPower_ReturnsShapeGreaterThan2()
{
Variable<GammaPower> xPriorVar = Variable.Observed(default(GammaPower)).Named("xPrior");
Variable<double> x = Variable<double>.Random(xPriorVar).Named("x");
Variable<double> power = Variable.Observed(0.5).Named("power");
var y = x ^ power;
y.Name = nameof(y);
Variable<GammaPower> yLikeVar = Variable.Observed(default(GammaPower)).Named("yLike");
Variable.ConstrainEqualRandom(y, yLikeVar);
y.SetMarginalPrototype(yLikeVar);
InferenceEngine engine = new InferenceEngine();
foreach (var powerValue in linspace(1, 10, 10))
{
GammaPower xPrior = new GammaPower(3, 3, 1);
xPriorVar.ObservedValue = xPrior;
GammaPower yLike = GammaPower.Uniform(-1);
yLikeVar.ObservedValue = yLike;
power.ObservedValue = powerValue;
var yActual = engine.Infer<GammaPower>(y);
// Importance sampling
GammaEstimator xEstimator = new GammaEstimator();
GammaEstimator yEstimator = new GammaEstimator();
MeanVarianceAccumulator mva = new MeanVarianceAccumulator();
int nSamples = 1000000;
for (int i = 0; i < nSamples; i++)
{
double xSample = xPrior.Sample();
double ySample = System.Math.Pow(xSample, power.ObservedValue);
double logWeight = yLike.GetLogProb(ySample);
double weight = System.Math.Exp(logWeight);
xEstimator.Add(xSample, weight);
yEstimator.Add(ySample, weight);
mva.Add(1 / ySample, weight);
}
Gamma xExpected = xEstimator.GetDistribution(new Gamma());
Gamma yExpected = yEstimator.GetDistribution(new Gamma());
double yActualMeanInverse = yActual.GetMeanPower(-1);
double meanInverseError = MMath.AbsDiff(mva.Mean, yActualMeanInverse, 1e-8);
Trace.WriteLine($"power = {powerValue}: y = {yActual}[E^-1={yActualMeanInverse}] should be {yExpected}[E^-1={mva.Mean}], error = {meanInverseError}");
Assert.True(yActual.Shape > 2);
Assert.True(MMath.AbsDiff(yExpected.GetMean(), yActual.GetMean(), 1e-8) < 1);
//Assert.True(meanInverseError < 10);
}
}
[Fact]
public void GammaPowerPowerTest()
{
Assert.False(double.IsNaN(PowerOp.GammaPowerFromDifferentPower(new GammaPower(1.333, 1.5, 1), 0.01).Shape));
Assert.True(PowerOp.XAverageConditional(new GammaPower(7, 0.1111, -1), new GammaPower(16.19, 0.06154, 1), 2.2204460492503136E-16, GammaPower.Uniform(1)).IsProper());
Assert.True(PowerOp.PowAverageConditional(GammaPower.FromShapeAndRate(9.0744065303642287, 8.7298765698182414, 1), 1.6327904641199278, GammaPower.Uniform(-1)).IsProper());
Assert.False(PowerOp.XAverageConditional(GammaPower.Uniform(-1), GammaPower.FromShapeAndRate(1, 1, 1), 4.0552419045546273, GammaPower.Uniform(1)).IsPointMass);
Variable<GammaPower> xPriorVar = Variable.Observed(default(GammaPower)).Named("xPrior");
Variable<double> x = Variable<double>.Random(xPriorVar).Named("x");
Variable<double> power = Variable.Observed(0.5).Named("power");
var y = x ^ power;
y.Name = nameof(y);
Variable<GammaPower> yLikeVar = Variable.Observed(default(GammaPower)).Named("yLike");
Variable.ConstrainEqualRandom(y, yLikeVar);
y.SetMarginalPrototype(yLikeVar);
InferenceEngine engine = new InferenceEngine();
foreach (var (xPower, yPower) in new[]
{
(-1, -1),
(-1, -0.5),
(1, 1),
(2, 2)
})
{
var xPrior = GammaPower.FromShapeAndRate(2, 3, xPower);
xPriorVar.ObservedValue = xPrior;
GammaPower yLike = GammaPower.FromShapeAndRate(1, 0.5, yPower);
yLikeVar.ObservedValue = yLike;
// Importance sampling
GammaPowerEstimator xEstimator = new GammaPowerEstimator(xPrior.Power);
GammaPowerEstimator yEstimator = new GammaPowerEstimator(yLike.Power);
MeanVarianceAccumulator yMva = new MeanVarianceAccumulator();
int nSamples = 1000000;
Rand.Restart(0);
for (int i = 0; i < nSamples; i++)
{
double xSample = xPrior.Sample();
double ySample = System.Math.Pow(xSample, power.ObservedValue);
if (double.IsNaN(ySample) || double.IsInfinity(ySample)) throw new Exception();
double logWeight = yLike.GetLogProb(ySample);
if (double.IsNaN(logWeight)) throw new Exception();
double weight = System.Math.Exp(logWeight);
xEstimator.Add(xSample, weight);
yEstimator.Add(ySample, weight);
yMva.Add(ySample, weight);
}
GammaPower xExpected = xEstimator.GetDistribution(xPrior);
GammaPower yExpected = yEstimator.GetDistribution(yLike);
if(yLike.Power == -1)
yExpected = GammaPower.FromMeanAndVariance(yMva.Mean, yMva.Variance, yLike.Power);
var xActual = engine.Infer<GammaPower>(x);
double xError = MMath.AbsDiff(xActual.GetMean(), xExpected.GetMean(), 1e-6);
Trace.WriteLine($"x = {xActual} should be {xExpected}, error = {xError}");
var yActual = engine.Infer<GammaPower>(y);
double yError = MMath.AbsDiff(yActual.GetMean(), yExpected.GetMean(), 1e-6);
Trace.WriteLine($"y = {yActual} should be {yExpected}, error = {yError}");
double tolerance = 1e-1;
Assert.True(xError < tolerance);
Assert.True(yError < tolerance);
}
}
[Fact]
public void VectorTimesScalarTest()
{
@ -1540,55 +1703,217 @@ namespace Microsoft.ML.Probabilistic.Tests
}
[Fact]
[Trait("Category", "ModifiesGlobals")]
public void GammaProductRRRTest()
{
Variable<bool> evidence = Variable.Bernoulli(0.5).Named("evidence");
IfBlock block = Variable.If(evidence);
Variable<double> shape = Variable.Observed(2.5).Named("shape");
Gamma scalePrior = Gamma.FromShapeAndRate(3, 4);
Variable<double> scale = Variable<double>.Random(scalePrior).Named("scale");
Variable<double> y = Variable.GammaFromShapeAndRate(shape, 1).Named("y");
Variable<double> x = (y * scale).Named("x");
Gamma xPrior = Gamma.FromShapeAndRate(5, 6);
Variable.ConstrainEqualRandom(x, xPrior);
Variable<Gamma> bPriorVar = Variable.Observed(default(Gamma)).Named("bPrior");
Variable<double> b = Variable<double>.Random(bPriorVar).Named("b");
Variable<Gamma> aPriorVar = Variable.Observed(default(Gamma)).Named("aPrior");
Variable<double> a = Variable<double>.Random(aPriorVar).Named("a");
Variable<double> product = (a * b).Named("product");
Variable<Gamma> productPriorVar = Variable.Observed(default(Gamma)).Named("productPrior");
Variable.ConstrainEqualRandom(product, productPriorVar);
block.CloseBlock();
InferenceEngine engine = new InferenceEngine();
Gamma scaleActual = engine.Infer<Gamma>(scale);
Gamma yActual = engine.Infer<Gamma>(y);
double evActual = engine.Infer<Bernoulli>(evidence).LogOdds;
Gamma scaleExpected = new Gamma(3.335, 0.1678);
Gamma yExpected = new Gamma(3.021, 0.5778);
double evExpected = -0.919181055678219;
if (false)
var groundTruthArray = new[]
{
// importance sampling
double totalWeight = 0;
Gamma yPrior = Gamma.FromShapeAndRate(shape.ObservedValue, 1);
GammaEstimator scaleEst = new GammaEstimator();
GammaEstimator yEst = new GammaEstimator();
int numIter = 1000000;
for (int iter = 0; iter < numIter; iter++)
((Gamma.FromShapeAndRate(1, 2), Gamma.FromShapeAndRate(10, 10), Gamma.FromShapeAndRate(101, double.MaxValue)),
(Gamma.PointMass(0), Gamma.FromShapeAndScale(9, 0.1), Gamma.PointMass(5.6183114927306835E-307), 0.79850769622135)),
((Gamma.FromShapeAndRate(1, 2), Gamma.FromShapeAndRate(10, 10), Gamma.FromShapeAndRate(101, double.PositiveInfinity)),
(Gamma.PointMass(0), Gamma.PointMass(0), Gamma.FromShapeAndRate(101, double.PositiveInfinity), double.NegativeInfinity)),
((Gamma.FromShapeAndRate(1, 1), Gamma.FromShapeAndRate(1, 1), Gamma.Uniform()),
(Gamma.FromShapeAndRate(1, 1), Gamma.FromShapeAndRate(1, 1), new Gamma(0.3332, 3), 0)),
((Gamma.FromShapeAndRate(1, 1), Gamma.FromShapeAndRate(1, 1), Gamma.FromShapeAndRate(30, 1)),
(Gamma.FromShapeAndRate(9.31900740051435, 1.7964933711493432), Gamma.FromShapeAndRate(9.2434278607621678, 1.7794585918328409), Gamma.FromShapeAndRate(26.815751741761741, 1.0848182432245914), -10.6738675986344)),
((Gamma.FromShapeAndRate(3, 4), Gamma.FromShapeAndRate(2.5, 1), Gamma.FromShapeAndRate(5, 6)),
(new Gamma(3.335, 0.1678), new Gamma(3.021, 0.5778), new Gamma(5.619, 0.1411), -0.919181055678219)),
((Gamma.FromShapeAndRate(3, 4), Gamma.FromShapeAndRate(2.5, 1), Gamma.Uniform()),
(Gamma.FromShapeAndRate(3, 4), Gamma.FromShapeAndRate(2.5, 1), new Gamma(1.154, 1.625), 0.0)),
};
using (TestUtils.TemporarilyAllowGammaImproperProducts)
{
foreach (var groundTruth in groundTruthArray)
{
double scaleSample = scalePrior.Sample();
double ySample = yPrior.Sample();
double logWeight = xPrior.GetLogProb(ySample * scaleSample);
double weight = System.Math.Exp(logWeight);
totalWeight += weight;
scaleEst.Add(scaleSample, weight);
yEst.Add(ySample, weight);
var (bPrior, aPrior, productPrior) = groundTruth.Item1;
var (bExpected, aExpected, productExpected, evExpected) = groundTruth.Item2;
bPriorVar.ObservedValue = bPrior;
aPriorVar.ObservedValue = aPrior;
productPriorVar.ObservedValue = productPrior;
Gamma bActual = engine.Infer<Gamma>(b);
Gamma aActual = engine.Infer<Gamma>(a);
Gamma productActual = engine.Infer<Gamma>(product);
double evActual = engine.Infer<Bernoulli>(evidence).LogOdds;
if (false)
{
// importance sampling
double totalWeight = 0;
GammaEstimator bEst = new GammaEstimator();
GammaEstimator aEst = new GammaEstimator();
GammaEstimator productEst = new GammaEstimator();
int numIter = 1000000;
for (int iter = 0; iter < numIter; iter++)
{
double bSample = bPrior.Sample();
double aSample = aPrior.Sample();
double productSample = aSample * bSample;
double logWeight = productPrior.GetLogProb(productSample);
double weight = System.Math.Exp(logWeight);
totalWeight += weight;
bEst.Add(bSample, weight);
aEst.Add(aSample, weight);
productEst.Add(productSample, weight);
}
bExpected = bEst.GetDistribution(new Gamma());
evExpected = System.Math.Log(totalWeight / numIter);
aExpected = aEst.GetDistribution(new Gamma());
productExpected = productEst.GetDistribution(new Gamma());
}
double bError = bExpected.MaxDiff(bActual);
double aError = aExpected.MaxDiff(aActual);
double productError = productExpected.MaxDiff(productActual);
double evError = MMath.AbsDiff(evExpected, evActual, 1e-6);
bool trace = false;
if (trace)
{
Trace.WriteLine($"b = {bActual} should be {bExpected}, error = {bError}");
Trace.WriteLine($"a = {aActual}[variance={aActual.GetVariance()}] should be {aExpected}[variance={aExpected.GetVariance()}], error = {aError}");
Trace.WriteLine($"product = {productActual} should be {productExpected}, error = {productError}");
Trace.WriteLine($"evidence = {evActual} should be {evExpected}, error = {evError}");
}
Assert.True(bError < 3e-2);
Assert.True(aError < 1);
Assert.True(productError < 3e-2);
Assert.True(evError < 5e-2);
}
}
}
[Fact]
[Trait("Category", "ModifiesGlobals")]
public void GammaPowerProductRRRTest()
{
Variable<bool> evidence = Variable.Bernoulli(0.5).Named("evidence");
IfBlock block = Variable.If(evidence);
Variable<GammaPower> bPriorVar = Variable.Observed(default(GammaPower)).Named("bPrior");
Variable<double> b = Variable<double>.Random(bPriorVar).Named("b");
Variable<GammaPower> aPriorVar = Variable.Observed(default(GammaPower)).Named("aPrior");
Variable<double> a = Variable<double>.Random(aPriorVar).Named("a");
Variable<double> product = (a * b).Named("product");
Variable<GammaPower> productPriorVar = Variable.Observed(default(GammaPower)).Named("productPrior");
Variable.ConstrainEqualRandom(product, productPriorVar);
block.CloseBlock();
InferenceEngine engine = new InferenceEngine();
var groundTruthArray = new[]
{
((GammaPower.FromShapeAndRate(1, 2, 1), GammaPower.FromShapeAndRate(10, 10, 1), GammaPower.FromShapeAndRate(101, double.MaxValue, 1)),
(GammaPower.PointMass(0, 1.0), GammaPower.FromShapeAndScale(9, 0.1, 1), GammaPower.PointMass(5.6183114927306835E-307, 1), 0.79850769622135)),
((GammaPower.FromShapeAndRate(1, 2, 1), GammaPower.FromShapeAndRate(10, 10, 1), GammaPower.FromShapeAndRate(101, double.PositiveInfinity, 1)),
(GammaPower.PointMass(0, 1.0), GammaPower.PointMass(0, 1.0), GammaPower.FromShapeAndRate(101, double.PositiveInfinity, 1), double.NegativeInfinity)),
((GammaPower.FromShapeAndRate(2.25, 0.625, -1), GammaPower.FromShapeAndRate(100000002, 100000001, -1), GammaPower.PointMass(5, -1)),
(GammaPower.FromShapeAndRate(99999999.000000119, 19999999.375000019, -1.0), GammaPower.FromShapeAndRate(100000000.0, 100000001.125, -1.0), GammaPower.PointMass(5, -1), -6.5380532346178)),
((GammaPower.FromShapeAndRate(2.25, 0.625, -1), GammaPower.FromShapeAndRate(100000002, 100000001, -1), GammaPower.PointMass(0, -1)),
(GammaPower.PointMass(0, -1.0), GammaPower.PointMass(0, -1.0), GammaPower.PointMass(0, -1), double.NegativeInfinity)),
((GammaPower.FromShapeAndRate(0.83228652924877289, 0.31928405884349487, -1), GammaPower.FromShapeAndRate(1.7184321234630087, 0.709692740551586, -1), GammaPower.FromShapeAndRate(491, 1583.0722891566263, -1)),
(GammaPower.FromShapeAndRate(3.1727695744145481, 10.454478169320565, -1.0), GammaPower.FromShapeAndRate(2.469020042117986, 2.5421356314915293, -1.0), GammaPower.FromShapeAndRate(495.57371802470414, 1592.4685605878328, -1.0), -3.57744782716672)),
((GammaPower.FromShapeAndRate(1, 1, 1), GammaPower.FromShapeAndRate(1, 1, 1), GammaPower.Uniform(1)),
(GammaPower.FromShapeAndRate(1, 1, 1), GammaPower.FromShapeAndRate(1, 1, 1), new GammaPower(0.3332, 3, 1), 0)),
((GammaPower.FromShapeAndRate(1, 1, 1), GammaPower.FromShapeAndRate(1, 1, 1), GammaPower.FromShapeAndRate(30, 1, 1)),
(GammaPower.FromShapeAndRate(9.31900740051435, 1.7964933711493432, 1.0), GammaPower.FromShapeAndRate(9.2434278607621678, 1.7794585918328409, 1.0), GammaPower.FromShapeAndRate(26.815751741761741, 1.0848182432245914, 1.0), -10.6738675986344)),
((GammaPower.FromShapeAndRate(3, 1, -1), GammaPower.FromShapeAndRate(4, 1, -1), GammaPower.Uniform(-1)),
(GammaPower.FromShapeAndRate(3, 1, -1), GammaPower.FromShapeAndRate(4, 1, -1), GammaPower.FromShapeAndRate(2.508893738311099, 0.25145071304806094, -1.0), 0)),
((GammaPower.FromShapeAndRate(1, 1, -1), GammaPower.FromShapeAndRate(1, 1, -1), GammaPower.Uniform(-1)),
(GammaPower.FromShapeAndRate(1, 1, -1), GammaPower.FromShapeAndRate(1, 1, -1), new GammaPower(0.3332, 3, -1), 0)),
((GammaPower.FromShapeAndRate(1, 1, -1), GammaPower.FromShapeAndRate(1, 1, -1), GammaPower.FromShapeAndRate(30, 1, -1)),
(GammaPower.FromShapeAndRate(11.057594449558747, 2.0731054100295871, -1.0), GammaPower.FromShapeAndRate(11.213079710986863, 2.1031756133562678, -1.0), GammaPower.FromShapeAndRate(28.815751741667615, 1.0848182432207041, -1.0), -4.22210057295786)),
((GammaPower.FromShapeAndRate(1, 1, 2), GammaPower.FromShapeAndRate(1, 1, 2), GammaPower.Uniform(2)),
(GammaPower.FromShapeAndRate(1, 1, 2), GammaPower.FromShapeAndRate(1, 1, 2), GammaPower.FromShapeAndRate(0.16538410345846666, 0.219449497990138, 2.0), 0)),
((GammaPower.FromShapeAndRate(1, 1, 2), GammaPower.FromShapeAndRate(1, 1, 2), GammaPower.FromShapeAndRate(30, 1, 2)),
(GammaPower.FromShapeAndRate(8.72865708291647, 1.71734403810018, 2.0), GammaPower.FromShapeAndRate(8.5298603954575931, 1.6767026737490067, 2.0), GammaPower.FromShapeAndRate(25.831187278202215, 1.0852321896648485, 2.0), -14.5369973268808)),
};
using (TestUtils.TemporarilyAllowGammaImproperProducts)
{
foreach (var groundTruth in groundTruthArray)
{
var (bPrior, aPrior, productPrior) = groundTruth.Item1;
var (bExpected, aExpected, productExpected, evExpected) = groundTruth.Item2;
bPriorVar.ObservedValue = bPrior;
aPriorVar.ObservedValue = aPrior;
productPriorVar.ObservedValue = productPrior;
GammaPower bActual = engine.Infer<GammaPower>(b);
GammaPower aActual = engine.Infer<GammaPower>(a);
GammaPower productActual = engine.Infer<GammaPower>(product);
double evActual = engine.Infer<Bernoulli>(evidence).LogOdds;
if (false)
{
// importance sampling
Rand.Restart(0);
double totalWeight = 0;
GammaPowerEstimator bEstimator = new GammaPowerEstimator(bPrior.Power);
GammaPowerEstimator aEstimator = new GammaPowerEstimator(aPrior.Power);
GammaPowerEstimator productEstimator = new GammaPowerEstimator(productPrior.Power);
MeanVarianceAccumulator bMva = new MeanVarianceAccumulator();
MeanVarianceAccumulator aMva = new MeanVarianceAccumulator();
MeanVarianceAccumulator productMva = new MeanVarianceAccumulator();
int numIter = 10000000;
for (int iter = 0; iter < numIter; iter++)
{
if (iter % 1000000 == 0) Trace.WriteLine($"iter = {iter}");
double bSample = bPrior.Sample();
double aSample = aPrior.Sample();
if(productPrior.Rate > 1e100)
{
bSample = 0;
aSample = 0;
}
double productSample = aSample * bSample;
double logWeight = productPrior.GetLogProb(productSample);
double weight = System.Math.Exp(logWeight);
totalWeight += weight;
bEstimator.Add(bSample, weight);
aEstimator.Add(aSample, weight);
productEstimator.Add(productSample, weight);
bMva.Add(bSample, weight);
aMva.Add(aSample, weight);
productMva.Add(productSample, weight);
}
Trace.WriteLine($"totalWeight = {totalWeight}");
evExpected = System.Math.Log(totalWeight / numIter);
bExpected = bEstimator.GetDistribution(bPrior);
aExpected = aEstimator.GetDistribution(aPrior);
productExpected = productEstimator.GetDistribution(productPrior);
bExpected = GammaPower.FromMeanAndVariance(bMva.Mean, bMva.Variance, bPrior.Power);
aExpected = GammaPower.FromMeanAndVariance(aMva.Mean, aMva.Variance, aPrior.Power);
productExpected = GammaPower.FromMeanAndVariance(productMva.Mean, productMva.Variance, productPrior.Power);
Trace.WriteLine($"{Quoter.Quote(bExpected)}, {Quoter.Quote(aExpected)}, {Quoter.Quote(productExpected)}, {evExpected}");
}
double bError = bExpected.MaxDiff(bActual);
double aError = aExpected.MaxDiff(aActual);
double productError = productExpected.MaxDiff(productActual);
double evError = MMath.AbsDiff(evExpected, evActual, 1e-6);
bool trace = false;
if (trace)
{
Trace.WriteLine($"b = {bActual} should be {bExpected}, error = {bError}");
Trace.WriteLine($"a = {aActual}[variance={aActual.GetVariance()}] should be {aExpected}[variance={aExpected.GetVariance()}], error = {aError}");
Trace.WriteLine($"product = {productActual} should be {productExpected}, error = {productError}");
Trace.WriteLine($"evidence = {evActual} should be {evExpected}, error = {evError}");
}
Assert.True(bError < 10);
Assert.True(aError < 2);
Assert.True(productError < 9);
Assert.True(evError < 3e-2);
}
scaleExpected = scaleEst.GetDistribution(new Gamma());
evExpected = System.Math.Log(totalWeight / numIter);
yExpected = yEst.GetDistribution(new Gamma());
}
Console.WriteLine("scale = {0} should be {1}", scaleActual, scaleExpected);
Console.WriteLine("y = {0} should be {1}", yActual, yExpected);
Console.WriteLine("evidence = {0} should be {1}", evActual, evExpected);
Assert.True(scaleExpected.MaxDiff(scaleActual) < 2e-2);
Assert.True(yExpected.MaxDiff(yActual) < 0.5);
Assert.True(MMath.AbsDiff(evExpected, evActual, 1e-6) < 1e-1);
}
[Fact]

Просмотреть файл

@ -5193,6 +5193,45 @@ namespace Microsoft.ML.Probabilistic.Tests
InferNet.Infer(b, nameof(b));
}
[Fact]
[Trait("Category", "CsoftModel")]
public void GatedGammaPowerProductRRCTest()
{
double priorB = 0.1;
double meanX = 0.4;
double meanLogX = 1.4;
double y = 0.3;
double power = -0.5;
GammaPower priorZ = new GammaPower(0.2, 1.2, power);
GammaPower priorX = GammaPower.FromMeanAndMeanLog(meanX, meanLogX, power);
InferenceEngine engine = new InferenceEngine();
engine.Compiler.DeclarationProvider = Microsoft.ML.Probabilistic.Compiler.RoslynDeclarationProvider.Instance;
var ca = engine.Compiler.Compile(GatedGammaPowerProductRRCModel, priorB, priorX, y, priorZ);
ca.Execute(20);
GammaPower xy = GammaPower.FromMeanAndMeanLog(meanX * y, meanLogX + System.Math.Log(y), power);
double sumCondT = System.Math.Exp(priorZ.GetLogAverageOf(xy));
double Z = priorB * sumCondT + (1 - priorB);
double postB = priorB * sumCondT / Z;
Bernoulli bDist = ca.Marginal<Bernoulli>("b");
Console.WriteLine("b = {0} (should be {1})", bDist, postB);
Assert.True(System.Math.Abs(bDist.GetProbTrue() - postB) < 1e-4);
}
private void GatedGammaPowerProductRRCModel(double priorB, GammaPower priorX, double y, GammaPower priorZ)
{
bool b = Factor.Bernoulli(priorB);
double x = Factor.Random(priorX);
if (b)
{
double z = Factor.Product(x, y);
Constrain.EqualRandom(z, priorZ);
}
InferNet.Infer(b, nameof(b));
}
[Fact]
public void GatedGammaProductCRCTest()
{
@ -5232,6 +5271,46 @@ namespace Microsoft.ML.Probabilistic.Tests
}
}
[Fact]
public void GatedGammaPowerProductCRCTest()
{
Variable<bool> evidence = Variable.Bernoulli(0.5).Named("evidence");
IfBlock block = Variable.If(evidence);
double shape = 2.5;
double rate = 1;
double power = -0.5;
Variable<double> scale = Variable.Observed(0.0);
Variable<double> y = Variable.Random(GammaPower.FromShapeAndRate(shape, rate, power)).Named("y");
Variable<double> x = (y * scale).Named("x");
x.ObservedValue = 0.0;
block.CloseBlock();
InferenceEngine engine = new InferenceEngine();
foreach (var alg in new IAlgorithm[] { new ExpectationPropagation(), new VariationalMessagePassing() })
{
engine.Algorithm = alg;
foreach (var xObserved in new[] { 0.0, 2.0 })
{
x.ObservedValue = xObserved;
scale.ObservedValue = xObserved;
GammaPower yActual = engine.Infer<GammaPower>(y);
double evActual = engine.Infer<Bernoulli>(evidence).LogOdds;
GammaPower yExpected = GammaPower.FromShapeAndRate(shape, rate, power);
double evExpected = 0;
if (xObserved != 0)
{
evExpected = GammaPower.FromShapeAndRate(shape, rate / System.Math.Pow(scale.ObservedValue, 1/power), power).GetLogProb(x.ObservedValue);
yExpected = GammaPower.PointMass(1, power);
}
Console.WriteLine("y = {0} should be {1}", yActual, yExpected);
Console.WriteLine("evidence = {0} should be {1}", evActual, evExpected);
Assert.True(yExpected.MaxDiff(yActual) < 1e-8);
Assert.True(MMath.AbsDiff(evExpected, evActual, 1e-6) < 1e-8);
}
}
}
[Fact]
public void GatedGammaRatioCRCTest()
{

Просмотреть файл

@ -13,6 +13,7 @@ using Microsoft.ML.Probabilistic.Math;
namespace Microsoft.ML.Probabilistic.Tests
{
using System.Diagnostics;
using System.Threading;
using System.Threading.Tasks;
using Utilities;
using Assert = Microsoft.ML.Probabilistic.Tests.AssertHelper;
@ -22,6 +23,61 @@ namespace Microsoft.ML.Probabilistic.Tests
public class OperatorTests
{
[Fact]
public void PlusGammaOpTest()
{
long count = 0;
Parallel.ForEach(new[] {
GammaPower.FromShapeAndRate(double.MaxValue, double.MaxValue, 100000000000000.0),
}.Concat(GammaPowers()).Where(g => g.Power != 0 && g.Shape > 1).Take(100000), gammaPower =>
{
Assert.True(gammaPower.IsPointMass || gammaPower.IsProper());
GammaPower gammaPower1 = GammaPower.FromShapeAndRate(gammaPower.Shape, double.Epsilon, gammaPower.Power);
GammaPower gammaPower2 = GammaPower.FromShapeAndRate(gammaPower.Shape, double.MaxValue, gammaPower.Power);
double mean1 = gammaPower1.GetMean();
double mean2 = gammaPower2.GetMean();
double largestMean = System.Math.Max(mean1, mean2);
double smallestMean = System.Math.Min(mean1, mean2);
double mode1 = gammaPower1.GetMode();
double mode2 = gammaPower2.GetMode();
double largestMode = System.Math.Max(mode1, mode2);
double smallestMode = System.Math.Min(mode1, mode2);
foreach (var shift in DoublesAtLeastZero().Where(x => !double.IsInfinity(x)))
{
var result = PlusGammaOp.SumAverageConditional(gammaPower, shift);
Assert.True((gammaPower.IsPointMass && result.IsPointMass) || result.IsProper());
if (gammaPower.Power < 0)
{
double expected = gammaPower.GetMode() + shift;
expected = System.Math.Max(smallestMode, System.Math.Min(largestMode, expected));
double actual = result.GetMode();
Assert.False(double.IsNaN(actual));
double belowActual = GammaPower.FromShapeAndRate(result.Shape, MMath.PreviousDouble(result.Rate), result.Power).GetMode();
double aboveActual = GammaPower.FromShapeAndRate(result.Shape, MMath.NextDouble(result.Rate), result.Power).GetMode();
Assert.True(MMath.AbsDiff(expected, actual, 1) < double.PositiveInfinity ||
MMath.AbsDiff(expected, belowActual, 1e-8) < 1e-8 ||
MMath.AbsDiff(expected, aboveActual, 1e-8) < 1e-8);
}
else
{
double expected = gammaPower.GetMean() + shift;
expected = System.Math.Max(smallestMean, System.Math.Min(largestMean, expected));
double actual = result.GetMean();
Assert.False(double.IsNaN(actual));
double belowActual = GammaPower.FromShapeAndRate(result.Shape, MMath.PreviousDouble(result.Rate), result.Power).GetMean();
double aboveActual = GammaPower.FromShapeAndRate(result.Shape, MMath.NextDouble(result.Rate), result.Power).GetMean();
Assert.True(MMath.AbsDiff(expected, actual, 1) < double.PositiveInfinity ||
MMath.AbsDiff(expected, belowActual, 1e-8) < 1e-8 ||
MMath.AbsDiff(expected, aboveActual, 1e-8) < 1e-8);
}
}
Interlocked.Add(ref count, 1);
if (count % 100000 == 0)
Trace.WriteLine($"{count} cases passed");
});
Trace.WriteLine($"{count} cases passed");
}
[Fact]
public void AverageTest()
{
@ -51,6 +107,26 @@ namespace Microsoft.ML.Probabilistic.Tests
}
}
[Fact]
public void LargestDoubleRatioTest()
{
foreach (var denominator in DoublesGreaterThanZero())
{
if (double.IsPositiveInfinity(denominator)) continue;
foreach (var ratio in Doubles())
{
AssertLargestDoubleRatio(denominator, ratio);
}
}
}
private void AssertLargestDoubleRatio(double denominator, double ratio)
{
double numerator = MMath.LargestDoubleRatio(ratio, denominator);
Assert.True((double)(numerator * denominator) <= ratio);
Assert.True(double.IsPositiveInfinity(numerator) || (double)(MMath.NextDouble(numerator) * denominator) > ratio);
}
/// <summary>
/// Tests an edge case involving subnormal numbers.
/// </summary>
@ -58,12 +134,12 @@ namespace Microsoft.ML.Probabilistic.Tests
public void LargestDoubleProductTest2()
{
// This case needs 50 iterations
MMath.LargestDoubleProduct(1.7976931348623157E+308, 9.8813129168249309E-324);
MMath.LargestDoubleProduct(1.7976931348623157E+308, -4.94065645841247E-324);
MMath.LargestDoubleProduct(1.0000000000000005E-09, 1.0000000000000166E-300);
MMath.LargestDoubleProduct(1.0000000000000005E-09, -1.0000000000000166E-300);
MMath.LargestDoubleProduct(0.00115249439895759, 4.9187693503017E-319);
MMath.LargestDoubleProduct(0.00115249439895759, -4.9187693503017E-319);
MMath.LargestDoubleProduct(9.8813129168249309E-324, 1.7976931348623157E+308);
MMath.LargestDoubleProduct(-4.94065645841247E-324, 1.7976931348623157E+308);
MMath.LargestDoubleProduct(1.0000000000000166E-300, 1.0000000000000005E-09);
MMath.LargestDoubleProduct(-1.0000000000000166E-300, 1.0000000000000005E-09);
MMath.LargestDoubleProduct(4.9187693503017E-319, 0.00115249439895759);
MMath.LargestDoubleProduct(-4.9187693503017E-319, 0.00115249439895759);
}
[Fact]
@ -81,7 +157,7 @@ namespace Microsoft.ML.Probabilistic.Tests
private void AssertLargestDoubleProduct(double denominator, double ratio)
{
double numerator = MMath.LargestDoubleProduct(denominator, ratio);
double numerator = MMath.LargestDoubleProduct(ratio, denominator);
Assert.True((double)(numerator / denominator) <= ratio);
Assert.True(double.IsPositiveInfinity(numerator) || (double)(MMath.NextDouble(numerator) / denominator) > ratio);
}
@ -1280,10 +1356,10 @@ namespace Microsoft.ML.Probabilistic.Tests
[Fact]
public void GammaLower_IsIncreasingInX()
{
Parallel.ForEach (DoublesGreaterThanZero(), a =>
{
IsIncreasingForAtLeastZero(x => MMath.GammaLower(a, x));
});
Parallel.ForEach(DoublesGreaterThanZero(), a =>
{
IsIncreasingForAtLeastZero(x => MMath.GammaLower(a, x));
});
}
[Fact]
@ -2120,6 +2196,36 @@ zL = (L - mx)*sqrt(prec)
}
}
/// <summary>
/// Generates a representative set of proper Gamma distributions.
/// </summary>
/// <returns></returns>
public static IEnumerable<Gamma> Gammas()
{
foreach (var shape in DoublesGreaterThanZero())
{
foreach (var rate in DoublesGreaterThanZero())
{
yield return Gamma.FromShapeAndRate(shape, rate);
}
}
}
/// <summary>
/// Generates a representative set of proper GammaPower distributions.
/// </summary>
/// <returns></returns>
public static IEnumerable<GammaPower> GammaPowers()
{
foreach (var gamma in Gammas())
{
foreach (var power in Doubles().Where(x => !double.IsInfinity(x)))
{
yield return GammaPower.FromGamma(gamma, power);
}
}
}
/// <summary>
/// Generates a representative set of proper Gaussian distributions.
/// </summary>
@ -2162,12 +2268,12 @@ zL = (L - mx)*sqrt(prec)
double precMaxUlpErrorLowerBound = 0;
double precMaxUlpErrorUpperBound = 0;
Bernoulli precMaxUlpErrorIsBetween = new Bernoulli();
foreach(var isBetween in new[] { Bernoulli.PointMass(true), Bernoulli.PointMass(false), new Bernoulli(0.1) })
foreach (var isBetween in new[] { Bernoulli.PointMass(true), Bernoulli.PointMass(false), new Bernoulli(0.1) })
{
Parallel.ForEach (DoublesLessThanZero(), lowerBound =>
Parallel.ForEach(DoublesLessThanZero(), lowerBound =>
{
//Console.WriteLine($"isBetween = {isBetween}, lowerBound = {lowerBound:r}");
foreach (var upperBound in new[] { -lowerBound })// UpperBounds(lowerBound))
foreach (var upperBound in new[] { -lowerBound }.Concat(UpperBounds(lowerBound)).Take(1))
{
//Console.WriteLine($"lowerBound = {lowerBound:r}, upperBound = {upperBound:r}");
double center = MMath.Average(lowerBound, upperBound);
@ -2236,15 +2342,13 @@ zL = (L - mx)*sqrt(prec)
double maxUlpError = 0;
double maxUlpErrorLowerBound = 0;
double maxUlpErrorUpperBound = 0;
IEnumerable<double> lowerBounds = Doubles();
// maxUlpError = 22906784576, lowerBound = -0.010000000000000002, upperBound = -0.01
lowerBounds = new double[] { 0 };
Parallel.ForEach(lowerBounds, lowerBound =>
bool trace = false;
Parallel.ForEach(new double[] { 0 }.Concat(Doubles()).Take(1), lowerBound =>
{
foreach (double upperBound in new double[] { 1 })
//Parallel.ForEach(UpperBounds(lowerBound), upperBound =>
foreach (double upperBound in new double[] { 1 }.Concat(UpperBounds(lowerBound)).Take(1))
{
Trace.WriteLine($"lowerBound = {lowerBound:r}, upperBound = {upperBound:r}");
if (trace) Trace.WriteLine($"lowerBound = {lowerBound:r}, upperBound = {upperBound:r}");
foreach (var x in Gaussians())
{
if (x.IsPointMass) continue;
@ -2279,8 +2383,8 @@ zL = (L - mx)*sqrt(prec)
}
}
}
Trace.WriteLine($"maxUlpError = {maxUlpError}, lowerBound = {maxUlpErrorLowerBound:r}, upperBound = {maxUlpErrorUpperBound:r}");
}//);
if (trace) Trace.WriteLine($"maxUlpError = {maxUlpError}, lowerBound = {maxUlpErrorLowerBound:r}, upperBound = {maxUlpErrorUpperBound:r}");
}
});
Assert.True(maxUlpError < 1e3);
}
@ -2295,16 +2399,13 @@ zL = (L - mx)*sqrt(prec)
double precMaxUlpErrorLowerBound = 0;
double precMaxUlpErrorUpperBound = 0;
Bernoulli isBetween = new Bernoulli(1.0);
foreach (double lowerBound in new[] { -10000.0 })// Doubles())
//foreach (double lowerBound in Doubles())
bool trace = false;
foreach (double lowerBound in new[] { -10000.0 }.Concat(Doubles()).Take(1))
{
foreach (double upperBound in new[] { -9999.9999999999982 })
//foreach (double upperBound in UpperBounds(lowerBound))
//Parallel.ForEach(UpperBounds(lowerBound), upperBound =>
foreach (double upperBound in new[] { -9999.9999999999982 }.Concat(UpperBounds(lowerBound)).Take(1))
{
Trace.WriteLine($"lowerBound = {lowerBound:r}, upperBound = {upperBound:r}");
//foreach (var x in new[] { Gaussian.FromNatural(-0.1, 0.010000000000000002) })// Gaussians())
Parallel.ForEach (Gaussians().Where(g => !g.IsPointMass), x =>
if (trace) Trace.WriteLine($"lowerBound = {lowerBound:r}, upperBound = {upperBound:r}");
Parallel.ForEach(Gaussians().Where(g => !g.IsPointMass), x =>
{
double mx = x.GetMean();
Gaussian toX = DoubleIsBetweenOp.XAverageConditional(isBetween, x, lowerBound, upperBound);
@ -2384,9 +2485,12 @@ zL = (L - mx)*sqrt(prec)
}
}
});
}//);
Trace.WriteLine($"meanMaxUlpError = {meanMaxUlpError}, lowerBound = {meanMaxUlpErrorLowerBound:r}, upperBound = {meanMaxUlpErrorUpperBound:r}");
Trace.WriteLine($"precMaxUlpError = {precMaxUlpError}, lowerBound = {precMaxUlpErrorLowerBound:r}, upperBound = {precMaxUlpErrorUpperBound:r}");
}
if (trace)
{
Trace.WriteLine($"meanMaxUlpError = {meanMaxUlpError}, lowerBound = {meanMaxUlpErrorLowerBound:r}, upperBound = {meanMaxUlpErrorUpperBound:r}");
Trace.WriteLine($"precMaxUlpError = {precMaxUlpError}, lowerBound = {precMaxUlpErrorLowerBound:r}, upperBound = {precMaxUlpErrorUpperBound:r}");
}
}
// meanMaxUlpError = 4271.53318407361, lowerBound = -1.0000000000000006E-12, upperBound = inf
// precMaxUlpError = 5008, lowerBound = 1E+40, upperBound = 1.00000001E+40
@ -2404,19 +2508,15 @@ zL = (L - mx)*sqrt(prec)
double precMaxUlpErrorLowerBound = 0;
double precMaxUlpErrorUpperBound = 0;
Bernoulli isBetween = new Bernoulli(1.0);
foreach (double lowerBound in new[] { -1000.0 })// Doubles())
//foreach (double lowerBound in Doubles())
bool trace = false;
foreach (double lowerBound in new[] { -1000.0 }.Concat(Doubles()).Take(1))
{
foreach (double upperBound in new[] { 0.0 })
//foreach (double upperBound in UpperBounds(lowerBound))
//Parallel.ForEach(UpperBounds(lowerBound), upperBound =>
foreach (double upperBound in new[] { 0.0 }.Concat(UpperBounds(lowerBound)).Take(1))
{
Console.WriteLine($"lowerBound = {lowerBound:r}, upperBound = {upperBound:r}");
if (trace) Console.WriteLine($"lowerBound = {lowerBound:r}, upperBound = {upperBound:r}");
double center = (lowerBound + upperBound) / 2;
if (double.IsNegativeInfinity(lowerBound) && double.IsPositiveInfinity(upperBound))
center = 0;
//foreach (var x in new[] { Gaussian.FromNatural(0, 1e55) })// Gaussians())
//foreach (var x in Gaussians())
Parallel.ForEach(Gaussians(), x =>
{
double mx = x.GetMean();
@ -2483,9 +2583,12 @@ zL = (L - mx)*sqrt(prec)
}
}
});
}//);
Console.WriteLine($"meanMaxUlpError = {meanMaxUlpError}, lowerBound = {meanMaxUlpErrorLowerBound:r}, upperBound = {meanMaxUlpErrorUpperBound:r}");
Console.WriteLine($"precMaxUlpError = {precMaxUlpError}, lowerBound = {precMaxUlpErrorLowerBound:r}, upperBound = {precMaxUlpErrorUpperBound:r}");
}
if (trace)
{
Console.WriteLine($"meanMaxUlpError = {meanMaxUlpError}, lowerBound = {meanMaxUlpErrorLowerBound:r}, upperBound = {meanMaxUlpErrorUpperBound:r}");
Console.WriteLine($"precMaxUlpError = {precMaxUlpError}, lowerBound = {precMaxUlpErrorLowerBound:r}, upperBound = {precMaxUlpErrorUpperBound:r}");
}
}
// meanMaxUlpError = 104.001435643838, lowerBound = -1.0000000000000022E-37, upperBound = 9.9000000000000191E-36
// precMaxUlpError = 4960, lowerBound = -1.0000000000000026E-47, upperBound = -9.9999999000000263E-48
@ -2504,14 +2607,13 @@ zL = (L - mx)*sqrt(prec)
double precMaxUlpError = 0;
double precMaxUlpErrorLowerBound = 0;
double precMaxUlpErrorUpperBound = 0;
foreach (double lowerBound in new[] { 0 })
//foreach (double lowerBound in Doubles())
bool trace = false;
foreach (double lowerBound in new[] { 0.0 }.Concat(Doubles()).Take(1))
{
foreach (double upperBound in new[] { 1 })// DoublesGreaterThanZero())
//Parallel.ForEach(UpperBounds(lowerBound), upperBound =>
foreach (double upperBound in new[] { 1.0 }.Concat(UpperBounds(lowerBound)).Take(1))
{
Console.WriteLine($"lowerBound = {lowerBound:r}, upperBound = {upperBound:r}");
Parallel.ForEach (Gaussians(), x =>
if (trace) Console.WriteLine($"lowerBound = {lowerBound:r}, upperBound = {upperBound:r}");
Parallel.ForEach(Gaussians(), x =>
{
Gaussian toX = DoubleIsBetweenOp.XAverageConditional(true, x, lowerBound, upperBound);
Gaussian xPost;
@ -2573,9 +2675,12 @@ zL = (L - mx)*sqrt(prec)
}
}
});
}//);
Console.WriteLine($"meanMaxUlpError = {meanMaxUlpError}, lowerBound = {meanMaxUlpErrorLowerBound:r}, upperBound = {meanMaxUlpErrorUpperBound:r}");
Console.WriteLine($"precMaxUlpError = {precMaxUlpError}, lowerBound = {precMaxUlpErrorLowerBound:r}, upperBound = {precMaxUlpErrorUpperBound:r}");
}
if (trace)
{
Console.WriteLine($"meanMaxUlpError = {meanMaxUlpError}, lowerBound = {meanMaxUlpErrorLowerBound:r}, upperBound = {meanMaxUlpErrorUpperBound:r}");
Console.WriteLine($"precMaxUlpError = {precMaxUlpError}, lowerBound = {precMaxUlpErrorLowerBound:r}, upperBound = {precMaxUlpErrorUpperBound:r}");
}
}
// meanMaxUlpError = 33584, lowerBound = -1E+30, upperBound = 9.9E+31
// precMaxUlpError = 256, lowerBound = -1, upperBound = 0

Просмотреть файл

@ -25,6 +25,7 @@ namespace Microsoft.ML.Probabilistic.Tests
using Microsoft.ML.Probabilistic.Math;
using Xunit.Sdk;
using Microsoft.ML.Probabilistic.Models;
using Microsoft.ML.Probabilistic.Factors;
public static class TestUtils
{
@ -814,6 +815,14 @@ namespace Microsoft.ML.Probabilistic.Tests
}
}
/// <summary>
/// A test that uses this property must have Trait("Category", "ModifiesGlobals")
/// </summary>
public static IDisposable TemporarilyAllowGammaImproperProducts
{
get { return new TemporaryHelper(() => GammaProductOp_Laplace.ForceProper = false, () => GammaProductOp_Laplace.ForceProper = true); }
}
public static IDisposable TemporarilyAllowBetaImproperSums
{
get { return new TemporaryHelper(() => Beta.AllowImproperSum = true, () => Beta.AllowImproperSum = false); }