GSL::MultiMin::FdfMinimizer.alloc(type, n)
GSL::MultiMin::FMinimizer.alloc(type, n)
GSL::MultiMin::FdfMinimizer::CONJUGATE_FR
or "conjugate_fr"
GSL::MultiMin::FdfMinimizer::CONJUGATE_PR
or "conjugate_pr"
GSL::MultiMin::FdfMinimizer::VECTOR_BFGS
or "vector_bfgs"
GSL::MultiMin::FdfMinimizer::VECTOR_BFGS2
or "vector_bfgs2"
(GSL-1.9 or later)GSL::MultiMin::FdfMinimizer::STEEPEST_DESCENT
or "steepest_descent"
GSL::MultiMin::FMinimizer::NMSIMPLEX
or "nmsimplex"
ex:
include GSL::MultiMin m1 = FdfMinimizer.alloc(FdfMinimizer::CONJUGATE_FR, 2) m2 = FdfMinimizer.alloc("steepest_descent", 4) m3 = FMinimizer.alloc(FMinimizer::NMSIMPLEX, 3) m4 = FMinimizer.alloc("nmsimplex", 2)
GSL::MultiMin::FdfMinimizer#set(func, x, step_size, tol)
GSL::MultiMin::Function_fdf
class, see below) starting from
the initial point x (GSL::Vector
). The size of the first trial step is
given by step_size (Vector
). The accuracy of the line minimization is
specified by tol.GSL::MultiMin::FMinimizer#set(func, x, step_size)
GSL::MultiMin::FdfMinimizer#name
GSL::MultiMin::FMinimizer#name
You must provide a parametric function of n
variables for the minimizers to
operate on. You may also need to provide a routine which calculates the gradient of the
function. In order to allow for general parameters the functions are defined by the
classes, GSL::MultiMin::Function_fdf
and GSL::MultiMin::Function
.
GSL::MultiMin:Function_fdf.alloc(proc_f, proc_df, n)
GSL::MultiMin:Function_fdf.alloc(proc_f, proc_df, proc_fdf, n)
GSL::MultiMin:Function_fdf#set_procs(proc_f, proc_df)
GSL::MultiMin:Function_fdf#set_procs(proc_f, proc_df, n)
GSL::MultiMin:Function_fdf#set_procs(proc_f, proc_df, proc_fdf, n)
GSL::MultiMin:Function_fdf#set_params(params)
See example below.
include GSL::MultiMin my_f = Proc.new { |v, params| x = v[0]; y = v[1] p0 = params[0]; p1 = params[1] 10.0*(x - p0)*(x - p0) + 20.0*(y - p1)*(y - p1) + 30.0 } my_df = Proc.new { |v, params, df| x = v[0]; y = v[1] p0 = params[0]; p1 = params[1] df[0] = 20.0*(x-p0) df[1] = 40.0*(y-p1) } my_func = Function_fdf.alloc(my_f, my_df, 2) my_func.set_params([1.0, 2.0]) # parameters
GSL::MultiMin:Function.alloc(proc_f, n)
GSL::MultiMin:Function#set_proc(proc_f)
GSL::MultiMin:Function#set_proc(proc_f, n)
GSL::MultiMin:Function#set_params(params)
See example below.
include GSL::MultiMin np = 2 my_f = Proc.alloc { |v, params| x = v[0]; y = v[1] p0 = params[0]; p1 = params[1] 10.0*(x - p0)*(x - p0) + 20.0*(y - p1)*(y - p1) + 30.0 } my_func = Function.alloc(my_f, np) my_func.set_params([1.0, 2.0]) # parameters
GSL::MultiMin::FdfMinimizer#iterate
GSL::MultiMin::FMinimizer#iterate
GSL::MultiMin::FdfMinimizer#x
GSL::MultiMin::FdfMinimizer#minimum
GSL::MultiMin::FdfMinimizer#gradient
GSL::MultiMin::FMinimizer#x
GSL::MultiMin::FMinimizer#minimum
GSL::MultiMin::FMinimizer#size
GSL::MultiMin::FdfMinimizer#restart
A minimization procedure should stop when one of the following conditions is true:
The handling of these conditions is under user control. The methods below allow the user to test the precision of the current result.
GSL::MultiMin::FdfMinimizer#test_gradient(epsabs)
GSL::MultiMin::FdfMinimizer.test_gradient(g, epsabs)
These method test the norm of the gradient g against the absolute tolerance
epsabs. The gradient of a multidimensional function goes to zero at a minimum.
The tests return GSL::SUCCESS
if the following condition is achieved,
|g| < epsabs
and returns GSL::CONTINUE
otherwise. A suitable choice of epsabs can
be made from the desired accuracy in the function for small variations in x
.
The relationship between these quantities is given by \delta f = g \delta x
.
GSL::MultiMin::FdfMinimizer#test_size(epsabs)
GSL::MultiMin::FdfMinimizer.test_size(size, epsabs)
GSL::SUCCESS
if the size is smaller than tolerance,
otherwise GSL::CONTINUE
is returned.#!/usr/bin/env ruby require("gsl") include GSL::MultiMin my_f = Proc.new { |v, params| x = v[0]; y = v[1] p0 = params[0]; p1 = params[1] 10.0*(x - p0)*(x - p0) + 20.0*(y - p1)*(y - p1) + 30.0 } my_df = Proc.new { |v, params, df| x = v[0]; y = v[1] p0 = params[0]; p1 = params[1] df[0] = 20.0*(x-p0) df[1] = 40.0*(y-p1) } my_func = Function_fdf.alloc(my_f, my_df, 2) my_func.set_params([1.0, 2.0]) # parameters x = Vector.alloc(5.0, 7.0) # starting point minimizer = FdfMinimizer.alloc("conjugate_fr", 2) minimizer.set(my_func, x, 0.01, 1e-4) iter = 0 begin iter += 1 status = minimizer.iterate() status = minimizer.test_gradient(1e-3) if status == GSL::SUCCESS puts("Minimum found at") end x = minimizer.x f = minimizer.f printf("%5d %.5f %.5f %10.5f\n", iter, x[0], x[1], f) end while status == GSL::CONTINUE and iter < 100
#!/usr/bin/env ruby require("gsl") include GSL::MultiMin np = 2 my_f = Proc.new { |v, params| x = v[0]; y = v[1] p0 = params[0]; p1 = params[1] 10.0*(x - p0)*(x - p0) + 20.0*(y - p1)*(y - p1) + 30.0 } my_func = Function.alloc(my_f, np) my_func.set_params([1.0, 2.0]) # parameters x = Vector.alloc([5, 7]) ss = Vector.alloc(np) ss.set_all(1.0) minimizer = FMinimizer.alloc("nmsimplex", np) minimizer.set(my_func, x, ss) iter = 0 begin iter += 1 status = minimizer.iterate() status = minimizer.test_size(1e-2) if status == GSL::SUCCESS puts("converged to minimum at") end x = minimizer.x printf("%5d ", iter); for i in 0...np do printf("%10.3e ", x[i]) end printf("f() = %7.3f size = %.3f\n", minimizer.fval, minimizer.size); end while status == GSL::CONTINUE and iter < 100