F# Multi Variable Curve Fitting Example

← All NMath Code Examples


namespace CenterSpace.NMath.Examples.FSharp

open System

open CenterSpace.NMath.Core

module MultiVariableCurveFittingExample =

    /// <summary>
    /// The MultiVariableFunctionFitter&lt;T&gt; Needs a parameterized function
    /// and a set of data points. One way to specify the parameterized function,
    /// and optionally its gradient with respect to the parameters, is to
    /// implement an instance of the abstract class DoubleParameterizedFunctional.
    /// You must overwrite the Evaluate() method which computes and returns the
    /// parameterized function value at a specified set of parameters and 
    /// point. It is optional to overwrite the GradientWithRespectToParams() method.
    /// If you do not overwrite it a numerical approximation using fintite differences
    /// will be used to approximate the gradient if it is needed.
    /// Here the parameterized function we are defining is a real valued function
    /// of two variables, x0 and x1, and three parameters, p0, p1, and p2, defined
    /// by the formula:
    /// p0*x1*x0^2 + p1*sin(x0) + p2*x1^3
    /// </summary>
    /// <summary>
    /// Creates an instance of our parameterized function. We must 
    /// initialize the base class with the dimension of our functions     
    /// domain. Since our function is a function of two variables 
    /// we initialize the base class with 2.
    /// </summary>
    type ParameterizedFunction() =
      inherit DoubleParameterizedFunctional(2)

      /// <summary>
      /// Override the abstract evaluate function.
      /// </summary>
      /// <param name="parameters">The parameter values.</param>
      /// <param name="x">The point to evaluate at.</param>
      /// <returns>The value of the parameterized function at the given
      /// point and parameters.</returns>
      override u.Evaluate( parameters : DoubleVector, x : DoubleVector) : double = 
        parameters.[0] * x.[1] * Math.Pow( x.[0], 2.0 ) + parameters.[1] * Math.Sin( x.[0] ) + parameters.[2] * Math.Pow( x.[1], 3.0 )
      /// <summary>
      /// Since the gradient of our function is rather easy to derive, we will
      /// override the GradientWithRespectToParams() function. Remember, this is
      /// the vector of partials with respect to the parameters, NOT the variables.
      /// </summary>
      /// <param name="parameters">Evaluate the gradient at these parameter values.</param>
      /// <param name="x">Evaluate the gradient at this point.</param>
      /// <param name="grad">Place the value of the gradient in this vector.</param>
      /// <remarks>Note how this function does not return the gradient as a new
      /// vector, but places the gradient value in a vector supplied by the 
      /// calling routine. This is for optimization purposes. The curve fitter uses 
      /// a optimization algorithm that will most likely be iterative, and thus may 
      /// need to evaluate the gradient many times. Having the vector 
      /// passed in to the routine allows the calling code to allocate space for the 
      /// gradient once and reuse it on successive calls, thus avoiding the potential 
      /// of allocating a large number of small objects on the managed heap.</remarks>
      member u.GradientWithRespectToParams( parameters : DoubleVector, x : DoubleVector, grad : DoubleVector ) = 
        grad.[0] = x.[0] * x.[0] * x.[1], grad.[1] = Math.Sin( x.[0] ), grad.[2] = Math.Pow( x.[1], 3.0 )
    /// <summary>
    /// A .NET example in C# showing how to fit a generalized multivariable function to a set 
    /// of points.
    /// </summary>
    /// <remarks>
    /// Uses the trust-region algorithm.
    /// </remarks>

    // Class MultiVariableFunctionFitter fits a parameterized multivariable function to a
    // set of points. In the space of the function parameters, begining at a specified
    // starting point, the Fit() method finds a minimum (possibly local) in the sum of
    // the squared residuals with respect to the data. Fit() uses a nonlinear least
    // squares minimizer specified as a generic.

    // For example, here is dataset from the Matlab docs, which fits a function
    // z = f(x, y) to three-dimensional data describing a surface  
    // http://www.mathworks.com/support/solutions/data/1-17YMU.html?solution=1-17YMU

    // Since the domain of the function has two dimensions, we use a two-column matrix to
    // hold the x,y data.
    let mutable xyValues = new DoubleMatrix(10, 2)

    xyValues.[Slice.All, 0] <- new DoubleVector("3.6 7.7 9.3 4.1 8.6 2.8 1.3 7.9 10.0 5.4")
    xyValues.[Slice.All, 1] <- new DoubleVector("16.5 150.6 263.1 24.7 208.5 9.9 2.7 163.9 325.0 54.3")

    let mutable zValues = new DoubleVector("95.09 23.11 60.63 48.59 89.12 76.97 45.68 1.84 82.17 44.47")
    // Published starting guess in the space of the function parameters.
    let start = new DoubleVector("10 10 10")

    // Construct a curve fitting object for our function, then perform the fit. We will use the
    // TrustRegionMinimizer implementation of the non-linear least squares minimizer to find the optimal
    // set of parameters. 
//    let f = new ParameterizedFunction()
    let fitter = new MultiVariableFunctionFitter<TrustRegionMinimizer>( new ParameterizedFunction() )
    let mutable solution = fitter.Fit(xyValues, zValues, start)

    // Display the results
    printfn "Fit #1"
    printfn "Matlab solution: 0.0074 -19.9749 -0.0000"
    printfn "NMath solution: %s" (solution.ToString())
    printfn "NMath residual: %A" fitter.Minimizer.FinalResidual
    printfn ""

    // The parameterized function used by the fitter may also be specified using a delegate.
    // here we define a delegate for the same function
    // p0*x1*x0^2 + p1*sin(x0) + p2*x1^3
    let xDimension = 2 // The dimension of the domain of f.
    let myFunc = new Func<DoubleVector,DoubleVector,double>(fun p x -> 
      p.[0] * x.[1] * Math.Pow( x.[0], 2.0 ) + p.[1] * Math.Sin( x.[0] ) + p.[2] * Math.Pow( x.[1], 3.0 ))
    // The delegate for the parameterized function may be used directly in MultiVariableFunctionFitter
    // constructors, or may be wrapped by the DoubleVectorParameterizedDelegate, which implements 
    // DoubleParameterizedFunctional. Here we do the latter.
    // Note that we do not supply the gradient with respect
    // to parameters here. The gradient will be computed using a finite difference algorithm if
    // needed.
    fitter.Function <- new DoubleVectorParameterizedDelegate( myFunc, xDimension )

    // Perform the fit and display the results
    solution <- fitter.Fit(xyValues, zValues, start)
    printfn "Fit #1 (Repeated without user specified Partial Derivatives)"
    printfn "NMath solution: %s" (solution.ToString())
    printfn "NMath residual: %A" fitter.Minimizer.FinalResidual
    printfn ""

    // Now lets perform the fit again using some random data. First we generate
    // 50 random x,y points in range (0,10).
    xyValues <- new DoubleMatrix(50, 2, new RandGenUniform(0.0, 10.0))

    // The target solution.
    let target = new DoubleVector("1 2 3")

    // When caculating the z values, we add some noise, so the points
    // dont lie exactly on the target surface.
    zValues <- new DoubleVector(50, new RandGenUniform(-1.0, 1.0))
    for i = 0 to zValues.Length - 1 do
      zValues.[i] <- myFunc.Invoke(target, xyValues.Row(i))
    // Perform the fit and display the results
    solution <- fitter.Fit(xyValues, zValues, start)
    printfn "Fit #2"
    printfn "Target solution: %s" (target.ToString())
    printfn "Actual solution: %s" (solution.ToString())
    printfn "Residual: %A" fitter.Minimizer.FinalResidual
    printfn ""
    printfn "Press Enter Key"
    Console.Read() |> ignore 

← All NMath Code Examples