opfunu.name_based package

opfunu.name_based.a_func module

class opfunu.name_based.a_func.AMGM(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

[1] The AMGM (Arithmetic Mean - Geometric Mean Equality). [2] Gavana, A. Global Optimization Benchmarks and AMPGO, retrieved 2015

continuous = True
convex = True
differentiable = False
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{AMGM}}(x) = \\left ( \\frac{1}{n} \\sum_{i=1}^{n} x_i - \\sqrt[n]{ \\prod_{i=1}^{n} x_i} \\right )^2'
latex_formula_bounds = 'x_i \\in [0., 10.0], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd \\in \\mathbb{N}_{+}^{*}'
latex_formula_global_optimum = 'f(x*)\\approx 0, at$$ $$x_1=x_2=...=x_n'
linear = False
modality = False
name = 'AMGM Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = False
class opfunu.name_based.a_func.Ackley01(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Adorio, E. MVF - “Multivariate Test Functions Library in C for Unconstrained Global Optimization”, 2005

TODO: the -0.2 factor in the exponent of the first term is given as -0.02 in Jamil et al.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Ackley01}}(x) = -20 e^{-0.2 \\sqrt{\\frac{1}{n} \\sum_{i=1}^n x_i^2}} - e^{\\frac{1}{n} \\sum_{i=1}^n \\cos(2 \\pi x_i)} + 20 + e'
latex_formula_bounds = 'x_i \\in [-35, 35], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd \\in \\mathbb{N}_{+}^{*}'
latex_formula_global_optimum = 'f(0, ..., 0) = 0'
linear = False
modality = False
name = 'Ackley 01'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = False
class opfunu.name_based.a_func.Ackley02(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark FunctionsFor Global Optimization Problems Int.

Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Ackley02}(x) = -200 e^{-0.02 \\sqrt{x_1^2 + x_2^2}}'
latex_formula_bounds = 'x_i \\in [-32.0, 32.0], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(0, ..., 0) = -200'
linear = False
modality = False
name = 'Ackley 02'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = True
class opfunu.name_based.a_func.Ackley03(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark FunctionsFor Global Optimization Problems Int.

Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Ackley03}}(x) = -200 e^{-0.02 \\sqrt{x_1^2 + x_2^2}} + 5e^{\\cos(3x_1) + \\sin(3x_2)}'
latex_formula_bounds = 'x_i \\in [-32.0, 32.0], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(x1, x2)\\approx-195.629028238419, at$$ $$x1=-0.682584587365898, and$$ $$ x2=-0.36075325513719'
linear = False
modality = False
name = 'Ackley 03'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = True
class opfunu.name_based.a_func.Adjiman(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

[1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark FunctionsFor Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Adjiman}}(x) = \\cos(x_1)\\sin(x_2) - \\frac{x_1}{(x_2^2 + 1)}'
latex_formula_bounds = 'x_1 \\in [-1.0, 2.0], x_2 \\in [-1., 1.]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(x1, x2)\\approx-2.02181, at$$ $$x1=2.0, and$$ $$ x2=0.10578'
linear = False
modality = False
name = 'Adjiman Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.a_func.Alpine01(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

[1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark FunctionsFor Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = False
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Alpine01}}(x) = \\sum_{i=1}^{n} \\lvert {x_i \\sin \\left( x_i\\right) + 0.1 x_i} \\rvert'
latex_formula_bounds = 'x_i \\in [-10.0, 10.0], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd \\in \\mathbb{N}_{+}^{*}'
latex_formula_global_optimum = 'f(x*)\\approx 0, at$$ $$x*=0.0'
linear = False
modality = False
name = 'Alpine01 Function'
parametric = False
randomized_term = False
scalable = False
separable = True
unimodal = False
class opfunu.name_based.a_func.Alpine02(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

[1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark FunctionsFor Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Alpine02}(x) = \\prod_{i=1}^{n} \\sqrt{x_i} \\sin(x_i)'
latex_formula_bounds = 'x_i \\in [0., 10.0], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd \\in \\mathbb{N}_{+}^{*}'
latex_formula_global_optimum = 'f(x*)\\approx -6.12950, at$$ $$x_1=7.91705268, x_2=4.81584232'
linear = False
modality = False
name = 'Alpine02 Function'
parametric = False
randomized_term = False
scalable = True
separable = True
unimodal = False

opfunu.name_based.b_func module

class opfunu.name_based.b_func.BartelsConn(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = False
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{BartelsConn}}(x) = \\lvert {x_1^2 + x_2^2 + x_1x_2} \\rvert + \\lvert {\\sin(x_1)} \\rvert + \\lvert {\\cos(x_2)} \\rvert'
latex_formula_bounds = 'x_i \\in [-500, 500], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(0, 0) = 1.0'
linear = False
modality = False
name = 'Bartels Conn Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.b_func.Beale(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = False
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Beale}}(x) = \\left(x_1 x_2 - x_1 + 1.5\\right)^{2} +\\left(x_1 x_2^{2} - x_1 + 2.25\right)^{2} + \\left(x_1 x_2^{3} - x_1 + 2.625\right)^{2}'
latex_formula_bounds = 'x_i \\in [-4.5, 4.5], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(3.0, 0.5) = 0.0'
linear = False
modality = False
name = 'Beale Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.b_func.BiggsExp02(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = '\\begin{matrix}f_{\\text{BiggsExp02}}(x) = \\sum_{i=1}^{10} (e^{-t_i x_1} - 5 e^{-t_i x_2} - y_i)^2 \\\\t_i = 0.1 i\\\\y_i = e^{-t_i} - 5 e^{-10t_i}\\\\ \\end{matrix}'
latex_formula_bounds = 'x_i \\in [0., 20.], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(1.0, 10.) = 0.0'
linear = False
modality = False
name = 'Biggs EXP2 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.b_func.BiggsExp03(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = '\\begin{matrix}\\ f_{\\text{BiggsExp03}}(x) = \\sum_{i=1}^{10} (e^{-t_i x_1} - x_3e^{-t_i x_2} - y_i)^2\\\\t_i = 0.1i\\\\ y_i = e^{-t_i} - 5e^{-10 t_i}\\\\ \\end{matrix}'
latex_formula_bounds = 'x_i \\in [0., 20.], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 3'
latex_formula_global_optimum = 'f(1.0, 10., 5.0) = 0.0'
linear = False
modality = False
name = 'Biggs EXP3 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.b_func.BiggsExp04(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = '\\begin{matrix}\\ f_{\\text{BiggsExp04}}(x) = \\sum_{i=1}^{10} (x_3 e^{-t_i x_1} - x_4 e^{-t_i x_2} - y_i)^2\\\\t_i = 0.1i\\\\ y_i = e^{-t_i} - 5 e^{-10 t_i}\\\\ \\end{matrix}'
latex_formula_bounds = 'x_i \\in [0., 20.], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 4'
latex_formula_global_optimum = 'f(1.0, 10., 1.0, 5.0) = 0.0'
linear = False
modality = False
name = 'Biggs EXP4 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.b_func.BiggsExp05(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = '\\begin{matrix}\\ f_{\\text{BiggsExp05}}(x) = \\sum_{i=1}^{11} (x_3 e^{-t_i x_1} - x_4 e^{-t_i x_2} + 3 e^{-t_i x_5} - y_i)^2\\\\t_i = 0.1i\\\\ y_i = e^{-t_i} - 5e^{-10 t_i} + 3e^{-4 t_i}\\\\ \\end{matrix}'
latex_formula_bounds = 'x_i \\in [0., 20.], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 5'
latex_formula_global_optimum = 'f(1.0, 10., 1.0, 5.0, 4.0) = 0.0'
linear = False
modality = False
name = 'Biggs EXP5 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.b_func.Bird(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Bird}}(x) = \\left(x_1 - x_2\\right)^{2} + e^{\\left[1 -\\sin\\left(x_1\\right) \\right]^{2}} \\cos\\left(x_2\\right) + e^{\\left[1 - \\cos\\left(x_2\\right)\\right]^{2}} \\sin\\left(x_1\\right)'
latex_formula_bounds = 'x_i \\in [-2\\pi, 2\\pi], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(4.701055751981055, 3.152946019601391) = f(-1.582142172055011, -3.130246799635430) = -106.7645367198034'
linear = False
modality = False
name = 'Bird Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.b_func.Bohachevsky1(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Bohachevsky}}(x) = \\sum_{i=1}^{n-1}\\left[x_i^2 + 2 x_{i+1}^2 - 0.3 \\cos(3 \\pi x_i) - 0.4 \\cos(4 \\pi x_{i + 1}) + 0.7 \\right]'
latex_formula_bounds = 'x_i \\in [-100, 100], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(0, 0) = 0'
linear = False
modality = False
name = 'Bohachevsky 1 Function'
parametric = False
randomized_term = False
scalable = False
separable = True
unimodal = False
class opfunu.name_based.b_func.Bohachevsky2(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Bohachevsky}}(x) = \\sum_{i=1}^{n-1}\\left[x_i^2 + 2 x_{i+1}^2 - 0.3 \\cos(3 \\pi x_i) - 0.4 \\cos(4 \\pi x_{i + 1}) + 0.7 \\right]'
latex_formula_bounds = 'x_i \\in [-100, 100], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(0, 0) = 0'
linear = False
modality = False
name = 'Bohachevsky 2 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.b_func.Bohachevsky3(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Bohachevsky}}(x) = \\sum_{i=1}^{n-1}\\left[x_i^2 + 2 x_{i+1}^2 - 0.3 \\cos(3 \\pi x_i) - 0.4 \\cos(4 \\pi x_{i + 1}) + 0.7 \\right]'
latex_formula_bounds = 'x_i \\in [-100, 100], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(0, 0) = 0'
linear = False
modality = False
name = 'Bohachevsky 3 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.b_func.Booth(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Booth}}(x) = (x_1 + 2x_2 - 7)^2 + (2x_1 + x_2 - 5)^2'
latex_formula_bounds = 'x_i \\in [-10, 10], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(1, 3) = 0'
linear = False
modality = False
name = 'Booth Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = True
class opfunu.name_based.b_func.BoxBetts(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{BoxBetts}}(x) = \\sum_{i=1}^k g(x_i)^2; g(x) = e^{-0.1i x_1} - e^{-0.1i x_2} - x_3\\left[e^{-0.1i} - e^{-i}\\right]; k=10'
latex_formula_bounds = 'x_1 \\in [0.9, 1.2], x_2 \\in [9, 11.2], x_3 \\in [0.9, 1.2]'
latex_formula_dimension = 'd = 3'
latex_formula_global_optimum = 'f(1, 10, 1) = 0'
linear = False
modality = False
name = 'Box-Betts Quadratic Sum Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.b_func.Branin01(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Branin01}}(x) = \\left(- 1.275 \\frac{x_1^{2}}{\\pi^{2}} + 5\\frac{x_1}{\\pi} + x_2 -6\\right)^{2} + \\left(10 -\\frac{5}{4 \\pi} \\right) \\cos\\left(x_1\\right) + 10'
latex_formula_bounds = 'x_1 \\in [-5, 10], x_2 \\in [0, 15]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(x_i) = 0.39788735772973816, x_i = [-\\pi, 12.275]; or [\\pi, 2.275] or x = [3\\pi, 2.475]'
linear = False
modality = False
name = 'Branin RCOS 1 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.b_func.Branin02(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Branin02}}(x) = \\left(- 1.275 \\frac{x_1^{2}}{\\pi^{2}} + 5 \\frac{x_1}{\\pi} + x_2 - 6 \\right)^{2} + \\left(10 - \\frac{5}{4 \\pi} \\right) \\cos\\left(x_1\\right) \\cos\\left(x_2\\right) + \\log(x_1^2+x_2^2 + 1) + 10'
latex_formula_bounds = 'x_1 \\in [-5, 15], x_2 \\in [-5, 15]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(x_i) = 5.559037, x_i = [-3.2, 12.53]'
linear = False
modality = False
name = 'Branin RCOS 2 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.b_func.Brent(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Brent}}(x) = (x_1 + 10)^2 + (x_2 + 10)^2 + e^{(-x_1^2 -x_2^2)}'
latex_formula_bounds = 'x_i \\in [-10, 10], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(x_i) = 0, x_i = [-10, -10]'
linear = False
modality = False
name = 'Brent Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = True
class opfunu.name_based.b_func.Brown(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Brown}}(x) = \\sum_{i=1}^{n-1}\\left[\\left(x_i^2\\right)^{x_{i + 1}^2 + 1} + \\left(x_{i + 1}^2\\right)^{x_i^2 + 1}\\right]'
latex_formula_bounds = 'x_i \\in [-1, 4], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(x_i) = 0, x_i = 0'
linear = False
modality = False
name = 'Brown Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = True
class opfunu.name_based.b_func.Bukin02(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Bukin02}}(x) = 100 (x_2^2 - 0.01x_1^2 + 1) + 0.01(x_1 + 10)^2'
latex_formula_bounds = 'x_1 \\in [-15, -5], x_2 \\in [-3, 3]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(x_i) = -124.75, x_i = [-15, 0]'
linear = False
modality = False
name = 'Bukin 2 Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = False
class opfunu.name_based.b_func.Bukin04(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = False
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Bukin04}}(x) = 100 x_2^{2} + 0.01 \\lvert{x_1 + 10}\\rvert'
latex_formula_bounds = 'x_1 \\in [-15, -5], x_2 \\in [-3, 3]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(x_i) = 0, x_i = [-10, 0]'
linear = False
modality = False
name = 'Bukin 4 Function'
parametric = False
randomized_term = False
scalable = True
separable = True
unimodal = False
class opfunu.name_based.b_func.Bukin06(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = False
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Bukin06}}(x) = 100 \\sqrt{ \\lvert{x_2 - 0.01 x_1^{2}}\\rvert} + 0.01 \\lvert{x_1 + 10} \\rvert'
latex_formula_bounds = 'x_1 \\in [-15, -5], x_2 \\in [-3, 3]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(x_i) = 0, x_i = [-10, 1]'
linear = False
modality = False
name = 'Bukin 6 Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = False

opfunu.name_based.c_func module

class opfunu.name_based.c_func.CamelSixHump(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = (4 - 2.1x_1^2 + x_1^4/3)x_1^2 + x_1x_2 + (4x_2^2 -4)x_2^2'
latex_formula_bounds = 'x_i \\in [-5, 5], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(-0.0898, 0.7126) = f(0.0898, -0.7126) = -1.0316284229280819'
linear = False
modality = False
name = 'Camel Function Six Hump'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.c_func.CamelThreeHump(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = False
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = 2x_1^2 -1.05x_1^4 + x_1^6/6 + x_1x_2 + x_2^2'
latex_formula_bounds = 'x_i \\in [-5, 5], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(0, 0) = 0'
linear = False
modality = False
name = 'Camel Function Three Hump'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.c_func.ChenBird(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = '
latex_formula_bounds = 'x_i \\in [-500, 500], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(-113.11622344, 227.73244688) = -1000'
linear = False
modality = False
name = 'Chen Bird Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.c_func.ChenV(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = '
latex_formula_bounds = 'x_i \\in [-500, 500], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(±0.70710678, ±0.70710678) = -2000.0039999840005'
linear = False
modality = False
name = 'Chen V Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.c_func.Chichinadze(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = x_{1}^{2} - 12 x_{1} + 8 \\sin\\left(\\frac{5}{2} \\pi x_{1}\\right)+ 10 \\cos\\left(\\frac{1}{2} \\pi x_{1}\\right) + 11 - 0.2 \\frac{\\sqrt{5}}{e^{\\frac{1}{2} \\left(x_{2} -0.5 \\right)^{2}}}'
latex_formula_bounds = 'x_i \\in [-30, 30], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(6.189866586965680, 0.5) = -42.94438701899098'
linear = False
modality = False
name = 'Chichinadze Function'
parametric = False
randomized_term = False
scalable = False
separable = True
unimodal = False
class opfunu.name_based.c_func.ChungReynolds(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = (\\sum_{i=1}^D x_i^2)^2'
latex_formula_bounds = 'x_i \\in [-100, 100], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd \\in \\mathbb{N}_{+}^{*}'
latex_formula_global_optimum = 'f(0,...,0) = 0'
linear = False
modality = False
name = 'Chung Reynolds Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = True
class opfunu.name_based.c_func.Cigar(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2022

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = x_1^2 + 10^6\\sum_{i=2}^{n} x_i^2'
latex_formula_bounds = 'x_i \\in [-100, 100], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd \\in \\mathbb{N}_{+}^{*}'
latex_formula_global_optimum = 'f(0,...,0) = 0'
linear = False
modality = False
name = 'Cigar Function'
parametric = False
randomized_term = False
scalable = True
separable = True
unimodal = True
class opfunu.name_based.c_func.Cola(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = x_1^2 + 10^6\\sum_{i=2}^{n} x_i^2'
latex_formula_bounds = 'x_0 \\in [0, 4], x_i \\in [-4, 4], \\forall i \\in \\llbracket 1, d-1\\rrbracket'
latex_formula_dimension = 'd = 17'
latex_formula_global_optimum = 'f(0,...,0) = 11.7464'
linear = False
modality = True
name = 'Cola Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.c_func.Colville(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\left(x_{1} -1\\right)^{2} + 100 \\left(x_{1}^{2} - x_{2}\\right)^{2} + 10.1 \\left(x_{2} -1\\right)^{2} + \\left(x_{3} -1\\right)^{2} + 90 \\left(x_{3}^{2} - x_{4}\\right)^{2} + 10.1 \\left(x_{4} -1\\right)^{2} + 19.8 \\frac{x_{4} -1}{x_{2}}'
latex_formula_bounds = 'x_i \\in [-10, 10], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 4'
latex_formula_global_optimum = 'f(1,...,1) = 0'
linear = False
modality = False
name = 'Colville Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.c_func.Corana(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = False
convex = False
differentiable = False
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = '
latex_formula_bounds = 'x_i \\in [-5, 5], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 4'
latex_formula_global_optimum = 'f(1,...,1) = 0'
linear = False
modality = False
name = 'Corana Function'
parametric = False
randomized_term = False
scalable = False
separable = True
unimodal = False
class opfunu.name_based.c_func.CosineMixture(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = False
convex = False
differentiable = False
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = -0.1 \\sum_{i=1}^n \\cos(5 \\pi x_i) - \\sum_{i=1}^n x_i^2'
latex_formula_bounds = 'x_i \\in [-1, 1], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(-1,...,-1) = -0.9*D'
linear = False
modality = False
name = 'Cosine Mixture Function'
parametric = False
randomized_term = False
scalable = False
separable = True
unimodal = False
class opfunu.name_based.c_func.CrossInTray(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = False
differentiable = False
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = - 0.0001 \\left(\\left|{e^{\\left|{100- \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi}}\\right|} \\sin\\left(x_{1}\\right) \\sin\\left(x_{2}\\right)}\\right| + 1\\right)^{0.1}'
latex_formula_bounds = 'x_i \\in [-10, 10], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(±1.349406608602084, ±1.349406608602084) = -2.062611870822739'
linear = False
modality = False
name = 'Cross-in-Tray Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.c_func.CrossLegTable(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods:

Evaluation on Some Benchmark Functions Munich University, 2006

continuous = True
convex = False
differentiable = False
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = -\\frac{1}{\\left(\\left|{e^{\\left|{100 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi}}\\right|}\\sin\\left(x_{1}\\right) \\sin\\left(x_{2}\\right)}\\right| + 1\\right)^{0.1}}'
latex_formula_bounds = 'x_i \\in [-10, 10], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(0, 0) = -1'
linear = False
modality = False
name = 'Cross-Leg-Table Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.c_func.CrownedCross(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods:

Evaluation on Some Benchmark Functions Munich University, 2006

continuous = True
convex = False
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = 0.0001 \\left(\\left|{e^{\\left|{100 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi}}\\right|}\\sin\\left(x_{1}\\right) \\sin\\left(x_{2}\\right)}\\right| + 1\\right)^{0.1}'
latex_formula_bounds = 'x_i \\in [-10, 10], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(0, 0) = 0.0001'
linear = False
modality = False
name = 'Cross-Leg-Table Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.c_func.Csendes(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods:

Evaluation on Some Benchmark Functions Munich University, 2006

continuous = True
convex = False
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\sum_{i=1}^n x_i^6 \\left[ 2 + \\sin \\left( \\frac{1}{x_i} \\right ) \\right]'
latex_formula_bounds = 'x_i \\in [-1, 1], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd \\in \\mathbb{N}_{+}^{*}'
latex_formula_global_optimum = 'f(0,..., 0) = 0'
linear = False
modality = False
name = 'Csendes Function'
parametric = False
randomized_term = False
scalable = True
separable = True
unimodal = False
class opfunu.name_based.c_func.Cube(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods:

Evaluation on Some Benchmark Functions Munich University, 2006

continuous = True
convex = False
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = 100(x_2 - x_1^3)^2 + (1 - x1)^2'
latex_formula_bounds = 'x_i \\in [-10, 10], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd \\in \\mathbb{N}_{+}^{*}'
latex_formula_global_optimum = 'f(1, 1) = 0'
linear = False
modality = False
name = 'Cube Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False

opfunu.name_based.d_func module

class opfunu.name_based.d_func.Damavandi(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\left[ 1 - \\lvert{\\frac{\\sin[\\pi (x_1 - 2)]\\sin[\\pi (x2 - 2)]}{\\pi^2 (x_1 - 2)(x_2 - 2)}}\\rvert^5 \\right] \\left[2 + (x_1 - 7)^2 + 2(x_2 - 7)^2 \\right]'
latex_formula_bounds = 'x_i \\in [0, 14], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(2, 2) = 0'
linear = False
modality = False
name = 'Damavandi Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.d_func.DeVilliersGlasser01(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\sum_{i=1}^{24} \\left[ x_1x_2^{t_i}\\sin(x_3t_i + x_4) - y_i \\right ]^2'
latex_formula_bounds = 'x_i \\in [-500, 500], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 4'
latex_formula_global_optimum = 'f(60.137, 1.371, 3.112, 1.761) = 0'
linear = False
modality = True
name = 'DeVilliers-Glasser 1 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.d_func.DeVilliersGlasser02(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\sum_{i=1}^{24} \\left[ x_1x_2^{t_i}\\tanh \\left [x_3t_i + \\sin(x_4t_i) \\right] \\cos(t_ie^{x_5}) - y_i \\right ]^2'
latex_formula_bounds = 'x_i \\in [-500, 500], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 5'
latex_formula_global_optimum = 'f(53.81, 1.27, 3.012, 2.13, 0.507) = 0'
linear = False
modality = False
name = 'DeVilliers-Glasser 2 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.d_func.Deb01(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = - \\frac{1}{N} \\sum_{i=1}^n \\sin^6(5 \\pi x_i)'
latex_formula_bounds = 'x_i \\in [-1, 1], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(0.3, -0.3) = -1'
linear = False
modality = True
name = 'Deb 1 Function'
parametric = False
randomized_term = False
scalable = False
separable = True
unimodal = False
class opfunu.name_based.d_func.Deb03(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = - \\frac{1}{N} \\sum_{i=1}^n \\sin^6 \\left[ 5 \\pi\\left ( x_i^{3/4} - 0.05 \\right) \\right ]'
latex_formula_bounds = 'x_i \\in [-1, 1], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(0.3, -0.3) = -1'
linear = False
modality = True
name = 'Deb 3 Function'
parametric = False
randomized_term = False
scalable = False
separable = True
unimodal = False
class opfunu.name_based.d_func.Decanomial(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = 0.001 \\left(\\lvert{x_{2}^{4} + 12 x_{2}^{3}+ 54 x_{2}^{2} + 108 x_{2} + 81.0}\\rvert + \\lvert{x_{1}^{10} - 20 x_{1}^{9} + 180 x_{1}^{8} - 960 x_{1}^{7} + 3360 x_{1}^{6}- 8064 x_{1}^{5} + 13340 x_{1}^{4} - 15360 x_{1}^{3} + 11520 x_{1}^{2} - 5120 x_{1} + 2624.0}\\rvert\\right)^{2}'
latex_formula_bounds = 'x_i \\in [-10, 10], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(2, -3) = 0'
linear = False
modality = False
name = 'Decanomial Function'
parametric = False
randomized_term = False
scalable = False
separable = True
unimodal = False
class opfunu.name_based.d_func.Deceptive(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015

continuous = True
convex = False
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = - \\left [\\frac{1}{n} \\sum_{i=1}^{n} g_i(x_i) \\right ]^{\\beta}'
latex_formula_bounds = 'x_i \\in [0, 1], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd \\in N^+'
latex_formula_global_optimum = 'f(alpha_i) = -1'
linear = False
modality = False
name = 'Deceptive Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = False
class opfunu.name_based.d_func.DeckkersAarts(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = False
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = 10^5x_1^2 + x_2^2 - (x_1^2 + x_2^2)^2 + 10^{-5}(x_1^2 + x_2^2)^4'
latex_formula_bounds = 'x_i \\in [-20, 20], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(0, \\pm 14.9451209) = -24776.518242168'
linear = False
modality = False
name = 'Deckkers-Aarts Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.d_func.DeflectedCorrugatedSpring(ndim=None, bounds=None, alpha=5.0)[source]

Bases: opfunu.benchmark.Benchmark

1

Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = 0.1\\sum_{i=1}^n \\left[ (x_i - \\alpha)^2 - \\cos \\left( K \\sqrt {\\sum_{i=1}^n (x_i - \\alpha)^2}\\right ) \\right ]'
latex_formula_bounds = 'x_i \\in [-0, 2\\alpha], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(x_i) = f(alpha_i) = -1'
linear = False
modality = True
name = 'Deflected Corrugated Spring Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = False
class opfunu.name_based.d_func.DixonPrice(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = (x_i - 1)^2 + \\sum_{i=2}^n i(2x_i^2 - x_{i-1})^2'
latex_formula_bounds = 'x_i \\in [-10, 10], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd \\in N^+'
latex_formula_global_optimum = 'f(2^{- \\frac{(2^i - 2)}{2^i}}) = 0'
linear = False
modality = False
name = 'Dixon & Price Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = True
class opfunu.name_based.d_func.Dolan(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\lvert (x_1 + 1.7 x_2)\\sin(x_1) - 1.5 x_3 - 0.1 x_4\\cos(x_5 + x_5 - x_1) + 0.2 x_5^2 - x_2 - 1 \\rvert'
latex_formula_bounds = 'x_i \\in [-100, 100], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 5'
latex_formula_global_optimum = 'f(8.39045925, 4.81424707, 7.34574133, 68.88246895, 3.85470806) = 0'
linear = False
modality = False
name = 'Dolan Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.d_func.DropWave(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = - \\frac{1 + \\cos\\left(12 \\sqrt{\\sum_{i=1}^{n} x_i^{2}}\\right)}{2 + 0.5 \\sum_{i=1}^{n} x_i^{2}}'
latex_formula_bounds = 'x_i \\in [-5.12, 5.12], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(0, 0) = -1'
linear = False
modality = False
name = 'DropWave Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False

opfunu.name_based.e_func module

class opfunu.name_based.e_func.Easom(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = a - \\frac{a}{e^{b \\sqrt{\\frac{\\sum_{i=1}^{n}x_i^{2}}{n}}}} + e - e^{\\frac{\\sum_{i=1}^{n} \\cos\\left(c x_i\\right)} {n}}'
latex_formula_bounds = 'x_i \\in [-100, 100], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(pi, pi) = -1'
linear = False
modality = False
name = 'Easom Function'
parametric = False
randomized_term = False
scalable = False
separable = True
unimodal = False
class opfunu.name_based.e_func.Eckerle4(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

[1] Eckerle, K., NIST (1979). Circular Interference Transmittance Study. [2] https://www.itl.nist.gov/div898/strd/nls/data/eckerle4.shtml

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = '
latex_formula_bounds = '0 <= x_1 <=20, 1 <= x_2 <= 20, 10 <= x_3 <= 600'
latex_formula_dimension = 'd = 3'
latex_formula_global_optimum = 'f(1.5543827178, 4.0888321754, 4.5154121844e2) = 1.4635887487E-03'
linear = False
modality = False
name = 'Eckerle 4 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.e_func.EggCrate(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = x_1^2 + x_2^2 + 25 \\left[ \\sin^2(x_1) + \\sin^2(x_2) \\right]'
latex_formula_bounds = 'x_i \\in [-5, 5], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(0, 0) = 0'
linear = False
modality = False
name = 'Egg Crate Function'
parametric = False
randomized_term = False
scalable = False
separable = True
unimodal = False
class opfunu.name_based.e_func.EggHolder(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\sum_{1}^{n - 1}\\left[-\\left(x_{i + 1}+ 47 \\right ) \\sin\\sqrt{\\lvert x_{i+1} + x_i/2 + 47 \\rvert} - x_i \\sin\\sqrt{\\lvert x_i - (x_{i + 1} + 47)\\rvert}\\right ]'
latex_formula_bounds = 'x_i \\in [-512, 512], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd \\in N^+'
latex_formula_global_optimum = 'f(512, 404.2319) = -959.640662711'
linear = False
modality = False
name = 'Egg Holder Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = False
class opfunu.name_based.e_func.ElAttarVidyasagarDutta(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = (x_1^2 + x_2 - 10)^2 + (x_1 + x_2^2 - 7)^2 + (x_1^2 + x_2^3 - 1)^2'
latex_formula_bounds = 'x_i \\in [-500, 500], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(3.40918683, -2.17143304) = 1.712780354'
linear = False
modality = False
name = 'El-Attar-Vidyasagar-Dutta Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = True
class opfunu.name_based.e_func.Exp2(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\sum_{i=0}^9 \\left ( e^{-ix_1/10} - 5e^{-ix_2/10} - e^{-i/10} + 5e^{-i} \\right )^2'
latex_formula_bounds = 'x_i \\in [0, 20], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(1, 10) = 0'
linear = False
modality = False
name = 'Exp 2 Function'
parametric = False
randomized_term = False
scalable = False
separable = True
unimodal = False
class opfunu.name_based.e_func.Exponential(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = -e^{-0.5 \\sum_{i=1}^n x_i^2}'
latex_formula_bounds = 'x_i \\in [-1, 1], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd \\in N^+'
latex_formula_global_optimum = 'f(0,..,0) = -1'
linear = False
modality = False
name = 'Exponential Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = False

opfunu.name_based.f_func module

class opfunu.name_based.f_func.FreudensteinRoth(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\left\\{x_1 - 13 + \\left[(5 - x_2) x_2- 2 \\right] x_2 \\right\\}^2 + \\left \\{x_1 - 29 + \\left[(x_2 + 1) x_2 - 14 \\right] x_2 \\right\\}^2'
latex_formula_bounds = 'x_i \\in [-10, 10], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(5, 4) = 0'
linear = False
modality = False
name = 'Freudenstein Roth Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False

opfunu.name_based.g_func module

class opfunu.name_based.g_func.Gear(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\left \\{ \\frac{1.0}{6.931}- \\frac{\\lfloor x_1\\rfloor \\lfloor x_2 \\rfloor } {\\lfloor x_3 \\rfloor \\lfloor x_4 \\rfloor } \\right\\}^2'
latex_formula_bounds = 'x_i \\in [12, 60], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 4'
latex_formula_global_optimum = 'f(16, 19, 43, 49) = 2.7 \\cdot 10^{-12}'
linear = False
modality = False
name = 'Gear Problem'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.g_func.Giunta(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = 0.6 + \\sum_{i=1}^{n} \\left[\\sin^{2}\\left(1- \\frac{16}{15} x_i\\right) - \\frac{1}{50} \\sin\\left(4 - \\frac{64}{15} x_i\\right) - \\sin\\left(1 - \\frac{16}{15} x_i\\right)\\right]'
latex_formula_bounds = 'x_i \\in [-1, 1], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f([0.4673200277395354, 0.4673200169591304]) = 0.06447042053690566'
linear = False
modality = False
name = 'Giunta Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.g_func.GoldsteinPrice(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\left[ 1 + (x_1 + x_2 + 1)^2 (19 - 14 x_1 + 3 x_1^2 - 14 x_2 + 6 x_1 x_2 + 3 x_2^2) \\right] \\left[ 30 + ( 2x_1 - 3 x_2)^2 (18 - 32 x_1 + 12 x_1^2 + 48 x_2 - 36 x_1 x_2 + 27 x_2^2) \\right]'
latex_formula_bounds = 'x_i \\in [-2, 2], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f([0, -1]) = 3'
linear = False
modality = False
name = 'Goldstein Price Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.g_func.Griewank(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\frac{1}{4000}\\sum_{i=1}^n x_i^2 - \\prod_{i=1}^n\\cos\\left(\\frac{x_i}{\\sqrt{i}}\\right) + 1'
latex_formula_bounds = 'x_i \\in [-100, 100], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd \\in N^+'
latex_formula_global_optimum = 'f(0,...,0) = 0'
linear = False
modality = False
name = 'Griewank Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = False
class opfunu.name_based.g_func.Gulf(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\sum_{i=1}^99 \\left( e^{-\\frac{\\lvert y_i - x_2 \\rvert^{x_3}}{x_1}}  - t_i \\right)'
latex_formula_bounds = 'x_i \\in [0, 60], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 3'
latex_formula_global_optimum = 'f(50, 25, 1.5) = 0'
linear = False
modality = False
name = 'Gulf Research Problem'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False

opfunu.name_based.h_func module

class opfunu.name_based.h_func.Hansen(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\left[ \\sum_{i=0}^4(i+1)\\cos(ix_1+i+1)\\right ]\\left[\\sum_{j=0}^4(j+1)\\cos[(j+2)x_2+j+1])\\right ]'
latex_formula_bounds = 'x_i \\in [-10, 10], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(-7.58989583, -7.70831466) = -176.54179'
linear = False
modality = False
name = 'Hansen Function'
parametric = False
randomized_term = False
scalable = False
separable = True
unimodal = False
class opfunu.name_based.h_func.Hartmann3(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = -\\sum\\limits_{i=1}^{4} c_i e^{-\\sum\\limits_{j=1}^{n}a_{ij}(x_j - p_{ij})^2}'
latex_formula_bounds = 'x_i \\in [0, 1], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 3'
latex_formula_global_optimum = 'f([0.11461292,  0.55564907,  0.85254697]) = -3.8627821478'
linear = False
modality = False
name = 'Hartman 3 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.h_func.Hartmann6(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = -\\sum\\limits_{i=1}^{4} c_i e^{-\\sum\\limits_{j=1}^{n}a_{ij}(x_j - p_{ij})^2}'
latex_formula_bounds = 'x_i \\in [0, 1], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd = 3'
latex_formula_global_optimum = 'f([0.20168952, 0.15001069, 0.47687398, 0.27533243, 0.31165162, 0.65730054]) = -3.32236801141551'
linear = False
modality = False
name = 'Hartman 6 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.h_func.HelicalValley(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = 100{[z-10\\Psi(x_1,x_2)]^2 +(\\sqrt{x_1^2+x_2^2}-1)^2}+x_3^2'
latex_formula_bounds = 'x_i \\in [-10, 10], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd \\in N^+'
latex_formula_global_optimum = 'f([1.0, 0.0, 0.0]) = 0'
linear = False
modality = False
name = 'Helical Valley'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.h_func.Himmelblau(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = (x_1^2 + x_2 - 11)^2 + (x_1 + x_2^2 - 7)^2'
latex_formula_bounds = 'x_i \\in [-5, 5], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd \\in N^+'
latex_formula_global_optimum = 'f([3, 2]) = 0'
linear = False
modality = False
name = 'Himmelblau Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.h_func.HolderTable(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = - \\left|{e^{\\left|{1- \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi} }\\right|} \\sin\\left(x_{1}\\right) \\cos\\left(x_{2}\\right)}\\right|'
latex_formula_bounds = ' 0 <= x_1 <= 5, 0 <= x2 <= 6'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(\\pm 9.664590028909654) = -19.20850256788675'
linear = False
modality = False
name = 'Hosaki Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.h_func.Hosaki(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\left ( 1 - 8 x_1 + 7 x_1^2 - \\frac{7}{3} x_1^3 + \\frac{1}{4} x_1^4 \\right ) x_2^2 e^{-x_1}'
latex_formula_bounds = ' 0 <= x_1 <= 5, 0 <= x2 <= 6'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(4, 2) = −2.3458'
linear = False
modality = False
name = 'Hosaki Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False

opfunu.name_based.i_func module

class opfunu.name_based.i_func.Infinity(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\sum_{i=1}^{n} x_i^{6} \\left [ \\sin\\left ( \\frac{1}{x_i} \\right ) + 2 \\right ]'
latex_formula_bounds = 'x_i \\in [-1, 1], \\forall i \\in \\llbracket 1, d\\rrbracket'
latex_formula_dimension = 'd \\in N^+'
latex_formula_global_optimum = 'f(0,..,0) = 0'
linear = False
modality = False
name = 'Hansen Function'
parametric = False
randomized_term = False
scalable = True
separable = True
unimodal = False

opfunu.name_based.j_func module

class opfunu.name_based.j_func.JennrichSampson(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{JennrichSampson}}(x) = \\sum_{i=1}^{10} \\left [2 + 2i - (e^{ix_1} + e^{ix_2}) \\right ]^2'
latex_formula_bounds = 'x_i \\in [-1, 1]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(0.257825, 0.257825) = 124.3621824'
linear = False
modality = False
name = 'Jennrich-Sampson Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.j_func.Judge(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015

\[f_{ ext{Judge}}(x) = \sum_{i=1}^{20} \left [ \left (x_1 + A_i x_2 + B x_2^2\]

ight ) - C_i ight ]^2

\[egin{cases} C = [4.284, 4.149, 3.877, 0.533, 2.211, 2.389, 2.145, 3.231, 1.998, 1.379, 2.106, 1.428, 1.011, 2.179, 2.858, 1.388, 1.651, 1.593, 1.046, 2.152] \ A = [0.286, 0.973, 0.384, 0.276, 0.973, 0.543, 0.957, 0.948, 0.543, 0.797, 0.936, 0.889, 0.006, 0.828, 0.399, 0.617, 0.939, 0.784, 0.072, 0.889] \ B = [0.645, 0.585, 0.310, 0.058, 0.455, 0.779, 0.259, 0.202, 0.028, 0.099, 0.142, 0.296, 0.175, 0.180, 0.842, 0.039, 0.103, 0.620, 0.158, 0.704] \end{cases}\]

with \(x_i \in [-10, 10]\) for \(i = 1, 2\). Global optimum: \(f(x_i) = 16.0817307\) for \(\mathbf{x} = [0.86479, 1.2357]\).

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Judge}}(x) = \\sum_{i=1}^{20} \\left [ \\left (x_1 + A_i x_2 + B x_2^2 \\right ) - C_i \\right ]^2'
latex_formula_bounds = 'x_i \\in [-10, 10]'
latex_formula_dimension = 'd \\in N^+'
latex_formula_global_optimum = 'f(0.86479, 1.2357) = 16.0817307'
linear = False
modality = False
name = 'Judge Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False

opfunu.name_based.k_func module

class opfunu.name_based.k_func.Katsuura(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Adorio, E. MVF - “Multivariate Test Functions Library in C for

Unconstrained Global Optimization”, 2005 [2] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015

\[f_{ ext{Katsuura}}(x) = \prod_{i=0}^{n-1} \left [ 1 + (i+1) \sum_{k=1}^{d} \lfloor (2^k x_i)\]

floor 2^{-k} ight ]

Where, in this exercise, \(d = 32\). Here, \(n\) represents the number of dimensions and

\(x_i \in [0, 100]\) for \(i = 1, ..., n\).

Global optimum: \(f(x) = 1\) for \(x_i = 0\) for \(i = 1, ..., n\).

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Katsuura}}(x) = \\prod_{i=0}^{n-1} \\left [ 1 + (i+1) \\sum_{k=1}^{d} \\lfloor (2^k x_i) \\rfloor 2^{-k} \\right ]'
latex_formula_bounds = 'x_i \\in [0, 100]'
latex_formula_dimension = 'd = 32'
latex_formula_global_optimum = 'f(0., 0., ..., 0.) = 1.'
linear = False
modality = False
name = 'Katsuura Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = False
class opfunu.name_based.k_func.Keane(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f_{ ext{Keane}}(x) =\]
rac{sin^2(x_1 - x_2)sin^2(x_1 + x_2)}{sqrt{x_1^2 + x_2^2}}

with \(x_i \in [0, 10]\) for \(i = 1, 2\).

Global optimum: \(f(x) = 0.0\) for \(x = [7.85396153, 7.85396135]\).

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Keane}}(x) = \\frac{\\sin^2(x_1 - x_2)\\sin^2(x_1 + x_2)} {\\sqrt{x_1^2 + x_2^2}}'
latex_formula_bounds = 'x_i \\in [0, 10]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(7.85396153, 7.85396135) = 0.'
linear = False
modality = True
name = 'Katsuura Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.k_func.Kowalik(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f_{ ext{Kowalik}}(x) = \sum_{i=0}^{10} \left [ a_i -\]

rac{x_1 (b_i^2 + b_i x_2)} {b_i^2 + b_i x_3 + x_4} ight ]^2

\[egin{matrix} a = [4, 2, 1, 1/2, 1/4 1/8, 1/10, 1/12, 1/14, 1/16] \ b = [0.1957, 0.1947, 0.1735, 0.1600, 0.0844, 0.0627, 0.0456, 0.0342, 0.0323, 0.0235, 0.0246]\ \end{matrix}\]

Here, \(n\) represents the number of dimensions and \(x_i \in [-5, 5]\) for \(i = 1, ..., 4\).

Global optimum: \(f(x) = 0.00030748610\) for \(x = [0.192833, 0.190836, 0.123117, 0.135766]\).

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Kowalik}}(x) = \\sum_{i=0}^{10} \\left [ a_i - \\frac{x_1 (b_i^2 + b_i x_2)} {b_i^2 + b_i x_3 + x_4} \\right ]^2'
latex_formula_bounds = 'x_i \\in [-5, 5]'
latex_formula_dimension = 'd = 4'
latex_formula_global_optimum = 'f(0.192833, 0.190836, 0.123117, 0.135766) = 0.00030748610'
linear = False
modality = False
name = 'Kowalik Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False

opfunu.name_based.l_func module

class opfunu.name_based.l_func.Langermann(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015

\[f_{ ext{Langermann}}(x) = - \sum_{i=1}^{5}\]

rac{c_i cosleft{pi left[left(x_{1}- a_i ight)^{2}

  • left(x_{2} - b_i

ight)^{2} ight] ight}}{e^{ rac{left( x_{1}

  • a_i

ight)^{2} + left( x_{2} - b_i ight)^{2}}{pi}}}

Where: .. math:

egin{matrix}
a = [3, 5, 2, 1, 7]\
b = [5, 2, 1, 4, 9]\
c = [1, 2, 5, 2, 3] \
\end{matrix}

Here \(x_i \in [0, 10]\) for \(i = 1, 2\). Global optimum: \(f(x) = -5.1621259\)

continuous = True
convex = False
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Langermann}}(x) = - \\sum_{i=1}^{5} \\frac{c_i \\cos\\left\\{\\pi \\left[\\left(x_{1}- a_i\\right)^{2}  + \\left(x_{2} - b_i \\right)^{2}\\right]\\right\\}}{e^{\\frac{\\left( x_{1} - a_i\\right)^{2} + \\left( x_{2} - b_i\\right)^{2}}{\\pi}}}'
latex_formula_bounds = 'x_i \\in [0, 10]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(2.00299219, 1.006096) = -5.1621259'
linear = False
modality = True
name = 'Langermann Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.l_func.LennardJones(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

http://www-wales.ch.cam.ac.uk/~jon/structures/LJ/tables.150.html

\[f_{ ext{LennardJones}}(\mathbf{x}) = \sum_{i=0}^{n-2}\sum_{j>1}^{n-1}\]

rac{1}{r_{ij}^{12}} - rac{1}{r_{ij}^{6}}

Where, in this exercise: .. math:

r_{ij} = \sqrt{(x_{3i}-x_{3j})^2 + (x_{3i+1}-x_{3j+1})^2) + (x_{3i+2}-x_{3j+2})^2}

Valid for any dimension, \(n = 3*k, k=2 , 3, 4, ..., 20\). \(k\) is the number of atoms in 3-D space constraints: unconstrained type: multi-modal with one global minimum; non-separable Value-to-reach: \(minima[k-2] + 0.0001\). Here, \(n\) represents the number of dimensions and \(x_i \in [-4, 4]\) for \(i = 1 ,..., n\). Global optimum: .. math:

ext{minima} = [-1.,-3.,-6.,-9.103852,-12.712062,-16.505384,\
         -19.821489, -24.113360, -28.422532,-32.765970,\
         -37.967600,-44.326801, -47.845157,-52.322627,\
         -56.815742,-61.317995, -66.530949, -72.659782,\
         -77.1777043]\
continuous = True
convex = False
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{LennardJones}}(\\mathbf{x}) = \\sum_{i=0}^{n-2}\\sum_{j>1}^{n-1}\\frac{1}{r_{ij}^{12}} - \\frac{1}{r_{ij}^{6}}'
latex_formula_bounds = 'x_i \\in [-4, 4]'
latex_formula_dimension = 'd \\in [6:60]'
latex_formula_global_optimum = 'f = [-1.,-3.,-6.,-9.103852,-12.712062,-16.505384, -19.821489, -24.113360, -28.422532,-32.765970, -37.967600,-44.326801, -47.845157,-52.322627, -56.815742,-61.317995, -66.530949, -72.659782, 77.1777043]'
linear = False
modality = True
name = 'LennardJones Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.l_func.Leon(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f_{ ext{Leon}}(\mathbf{x}) = \left(1 - x_{1}\]
ight)^{2}
  • 100 left(x_{2} - x_{1}^{2}

ight)^{2}

with \(x_i \in [-1.2, 1.2]\) for \(i = 1, 2\). Global optimum: \(f(x) = 0\) for \(x = [1, 1]\)

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Leon}}(\\mathbf{x}) = \\left(1 - x_{1}\\right)^{2} + 100 \\left(x_{2} - x_{1}^{2} \\right)^{2}'
latex_formula_bounds = 'x_i \\in [-1.2, 1.2]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(1, 1) = 0'
linear = False
modality = False
name = 'Leon Function'
parametric = False
randomized_term = False
scalable = False
separable = True
unimodal = True
class opfunu.name_based.l_func.Levy03(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation

on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005

\[f_{ ext{Levy03}}(\mathbf{x}) = \sin^2(\pi y_1)+\sum_{i=1}^{n-1}(y_i-1)^2[1+10\sin^2(\pi y_{i+1})]+(y_n-1)^2\]
\[y_i=1+\]
rac{x_i-1}{4}

Here, \(n\) represents the number of dimensions and \(x_i \in [-10, 10]\) for \(i=1,...,n\). Global optimum: \(f(x_i) = 0\) for \(x_i = 1\) for \(i=1,...,n\)

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Levy03}}(\\mathbf{x}) = \\sin^2(\\pi y_1)+\\sum_{i=1}^{n-1}(y_i-1)^2[1+10\\sin^2(\\pi y_{i+1})]+(y_n-1)^2'
latex_formula_bounds = 'x_i \\in [-10, 10]'
latex_formula_dimension = 'd \\in N^+'
latex_formula_global_optimum = 'f(1,... 1) = 0'
linear = False
modality = True
name = 'Levy 3 Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = False
class opfunu.name_based.l_func.Levy05(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation

on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005

\[f_{ ext{Levy05}}(\mathbf{x}) = \sum_{i=1}^{5} i \cos \left[(i-1)x_1 + i\]

ight] imes sum_{j=1}^{5} j cos left[(j+1)x_2 + j ight] + (x_1 + 1.42513)^2 + (x_2 + 0.80032)^2

Here, \(n\) represents the number of dimensions and \(x_i \in [-10, 10]\) for \(i=1,...,n\). Global optimum: \(f(x_i) = -176.1375779\) for \(\mathbf{x} = [-1.30685, -1.42485]\).

continuous = True
convex = False
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(\\mathbf{x}) = \\sum_{i=1}^{5} i \\cos \\left[(i-1)x_1 + i \\right] \\times \\sum_{j=1}^{5} j \\cos \\left[(j+1)x_2 + j \\right] + (x_1 + 1.42513)^2 + (x_2 + 0.80032)^2'
latex_formula_bounds = 'x_i \\in [-10, 10]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(-1.30685, -1.42485) = -176.1375779'
linear = False
modality = True
name = 'Levy 5 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.l_func.Levy13(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation

on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005

\[f_{ ext{Levy13}}(x) = \left(x_{1} -1\]

ight)^{2} left[sin^{2}left(3 pi x_{2} ight) + 1 ight] + left(x_{2}

  • 1

ight)^{2} left[sin^{2}left(2 pi x_{2} ight)+ 1 ight] + sin^{2}left(3 pi x_{1} ight)

with \(x_i \in [-10, 10]\) for \(i = 1, 2\). Global optimum: \(f(x) = 0\) for \(x = [1, 1]\)

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Levy13}}(x) = \\left(x_{1} -1\\right)^{2} \\left[\\sin^{2}\\left(3 \\pi x_{2}\\right) + 1\\right] + \\left(x_{2} - 1\\right)^{2} \\left[\\sin^{2}\\left(2 \\pi x_{2}\\right)+ 1\\right] + \\sin^{2}\\left(3 \\pi x_{1}\\right)'
latex_formula_bounds = 'x_i \\in [-10, 10]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(1., 1.) = 0'
linear = False
modality = True
name = 'Levy 5 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False

opfunu.name_based.m_func module

class opfunu.name_based.m_func.Matyas(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f_{ ext{Matyas}}(x) = 0.26(x_1^2 + x_2^2) - 0.48 x_1 x_2\]

Here \(x_i \in [-10, 10]\) for \(i = 1, 2\). Global optimum: \(f(x) = 0.0\)

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Matyas}}(x) = 0.26(x_1^2 + x_2^2) - 0.48 x_1 x_2'
latex_formula_bounds = 'x_i \\in [-10, 10]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(0, 0) = 0'
linear = False
modality = False
name = 'Matyas Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = True
class opfunu.name_based.m_func.McCormick(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f(x) = - x_{1} + 2 x_{2} + \left(x_{1} - x_{2}\]

ight)^{2} + sinleft(x_{1} + x_{2} ight) + 1

Here \(x_1 \in [-1.5, 4], x_2 \in [-3, 4]\) . Global optimum: \(f(x) = -1.913222954981037\)

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = - x_{1} + 2 x_{2} + \\left(x_{1} - x_{2}\\right)^{2} + \\sin\\left(x_{1} + x_{2}\\right) + 1'
latex_formula_bounds = 'x_1 \\in [-1.5, 4], x_2 \\in [-3, 4]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(-0.5471975602214493, -1.547197559268372) = -1.913222954981037'
linear = False
modality = False
name = 'McCormick Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.m_func.Meyer(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

https://www.itl.nist.gov/div898/strd/nls/data/mgh10.shtml

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x)'
latex_formula_bounds = 'x_1 \\in [0, 1], x_2 \\in [100, 1000], x_3 \\in [100, 500]'
latex_formula_dimension = 'd = 3'
latex_formula_global_optimum = 'f(5.6096364710e-3, 6.1813463463e2, 3.4522363462e2) = 8.7945855171e1'
linear = False
modality = False
name = 'Meyer Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.m_func.Michalewicz(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Adorio, E. MVF - “Multivariate Test Functions Library in C for

Unconstrained Global Optimization”, 2005

\[f(x) = - \sum_{i=1}^{2} \sin\left(x_i\]

ight) sin^{2 m}left( rac{i x_i^{2}}{pi} ight)

Here \(x_i \in [0, \pi]\). Global optimum: \(f(x) = -1.8013\)

continuous = True
convex = False
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = - x_{1} + 2 x_{2} + \\left(x_{1} - x_{2}\\right)^{2} + \\sin\\left(x_{1} + x_{2}\\right) + 1'
latex_formula_bounds = 'x_i \\in [0, \\pi]`'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(0, 0) = -1.8013'
linear = False
modality = False
name = 'McCormick Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.m_func.MieleCantrell(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f(x) = (e^{-x_1} - x_2)^4 + 100(x_2 - x_3)^6 + an^4(x_3 - x_4) + x_1^8\]

Here \(x_i \in [-1, 1] for i \in [1, 4]\). Global optimum: \(f(x) = 0\)

continuous = True
convex = False
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = (e^{-x_1} - x_2)^4 + 100(x_2 - x_3)^6 + \\tan^4(x_3 - x_4) + x_1^8'
latex_formula_bounds = 'x_i \\in [-1, 1] for i \\in [1, 4]'
latex_formula_dimension = 'd = 4'
latex_formula_global_optimum = 'f(0, 1, 1, 1) = 0'
linear = False
modality = False
name = 'Miele Cantrell Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.m_func.Mishra01(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f(x) = (1 + x_n)^{x_n} x_n = n - \sum_{i=1}^{n-1} x_i\]

Here \(x_i \in [0, 1] for i \in [1, n]\). Global optimum: \(f(x) = 2\)

continuous = True
convex = False
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = (1 + x_n)^{x_n}; x_n = n - \\sum_{i=1}^{n-1} x_i'
latex_formula_bounds = 'x_i \\in [0, 1] for i \\in [1, n]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(1) = 2'
linear = False
modality = False
name = 'Mishra 1 Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = False
class opfunu.name_based.m_func.Mishra02(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f(x) = (1 + x_n)^{x_n} x_n = n - \sum_{i=1}^{n-1}\]

rac{(x_i + x_{i+1})}{2}

Here \(x_i \in [0, 1] for i \in [1, n]\). Global optimum: \(f(x) = 2\)

continuous = True
convex = False
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = (1 + x_n)^{x_n}; x_n = n - \\sum_{i=1}^{n-1} \\frac{(x_i + x_{i+1})}{2}'
latex_formula_bounds = 'x_i \\in [0, 1] for i \\in [1, n]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(1) = 2'
linear = False
modality = False
name = 'Mishra 2 Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = False
class opfunu.name_based.m_func.Mishra03(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f(x) = \sqrt{\lvert \cos{\sqrt{\lvert x_1^2 + x_2^2\]

vert}} vert} + 0.01(x_1 + x_2)

Here \(x_i \in [0, 1] for i \in [1, n]\). Global optimum: \(f(-9.99378322, -9.99918927) = -0.19990562\)

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\sqrt{\\lvert \\cos{\\sqrt{\\lvert x_1^2 + x_2^2 \\rvert}} \\rvert} + 0.01(x_1 + x_2)'
latex_formula_bounds = 'x_i \\in [-10, 10] for i \\in [1, 2]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(-9.99378322, -9.99918927) = -0.19990562'
linear = False
modality = False
name = 'Mishra 3 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.m_func.Mishra04(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f(x) = \sqrt{\lvert \sin{\sqrt{\lvert x_1^2 + x_2^2\]

vert}} vert} + 0.01(x_1 + x_2)

Here \(x_i \in [-10, 10] for i \in [1, n]\). Global optimum: \(f(-8.71499636, -9.0533148) = -0.17767\)

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\sqrt{\\lvert \\sin{\\sqrt{\\lvert x_1^2 + x_2^2 \\rvert}} \\rvert} + 0.01(x_1 + x_2)'
latex_formula_bounds = 'x_i \\in [-10, 10] for i \\in [1, 2]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(-8.71499636, -9.0533148) = -0.17767'
linear = False
modality = False
name = 'Mishra 4 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.m_func.Mishra05(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f(x) = \left [ \sin^2 ((\cos(x_1) + \cos(x_2))^2) + \cos^2 ((\sin(x_1) + \sin(x_2))^2) + x_1\]

ight ]^2 + 0.01(x_1 + x_2)

Here \(x_i \in [-10, 10] for i \in [1, 2]\). Global optimum: \(f(-1.98682, -10) = -1.019829519930646\)

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\left [ \\sin^2 ((\\cos(x_1) + \\cos(x_2))^2) + \\cos^2 ((\\sin(x_1) + \\sin(x_2))^2) + x_1 \\right ]^2 + 0.01(x_1 + x_2)'
latex_formula_bounds = 'x_i \\in [-10, 10] for i \\in [1, 2]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(-1.98682, -10) = -1.019829519930646'
linear = False
modality = False
name = 'Mishra 5 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.m_func.Mishra06(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f(x) = -\log{\left [ \sin^2 ((\cos(x_1) + \cos(x_2))^2) - \cos^2 ((\sin(x_1) + \sin(x_2))^2) + x_1\]

ight ]^2} + 0.01 left[(x_1 -1)^2 + (x_2 - 1)^2 ight]

Here \(x_i \in [-10, 10] for i \in [1, 2]\). Global optimum: \(f(2.88631, 1.82326) = -2.28395\)

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = -\\log{\\left [ \\sin^2 ((\\cos(x_1) + \\cos(x_2))^2) - \\cos^2 ((\\sin(x_1) + \\sin(x_2))^2) + x_1 \\right ]^2} + 0.01 \\left[(x_1 -1)^2 + (x_2 - 1)^2 \\right]'
latex_formula_bounds = 'x_i \\in [-10, 10] for i \\in [1, 2]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(2.88631, 1.82326) = -2.28395'
linear = False
modality = False
name = 'Mishra 6 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.m_func.Mishra07(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f(x) = \left [\prod_{i=1}^{n} x_i - n!\]

ight]^2

Here \(x_i \in [-10, 10] for i \in [1, n]\). Global optimum: :math:`f(sqrt{n}) = 0, `

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\left [\\prod_{i=1}^{n} x_i - n! \\right]^2'
latex_formula_bounds = 'x_i \\in [-10, 10] \\forall i \\in [1, n]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(\\sqrt{n}) = 0'
linear = False
modality = False
name = 'Mishra 7 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.m_func.Mishra08(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f(x) = 0.001 \left[\lvert x_1^{10} - 20x_1^9 + 180x_1^8 - 960 x_1^7 + 3360x_1^6 - 8064x_1^5 + 13340x_1^4 - 15360x_1^3 + 11520x_1^2 - 5120x_1 + 2624\]

vert lvert x_2^4 + 12x_2^3 + 54x_2^2 + 108x_2 + 81 vert ight]^2

Here \(x_i \in [-10, 10] for i \in [1, 2]\). Global optimum: :math:`f(2, -3) = 0, `

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = 0.001 \\left[\\lvert x_1^{10} - 20x_1^9 + 180x_1^8 - 960 x_1^7 + 3360x_1^6 - 8064x_1^5 + 13340x_1^4 - 15360x_1^3 + 11520x_1^2 - 5120x_1 + 2624 \\rvert \\lvert x_2^4 + 12x_2^3 + 54x_2^2 + 108x_2 + 81 \\rvert \\right]^2'
latex_formula_bounds = 'x_i \\in [-10, 10] \\forall i \\in [1, 2]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(2, -3) = 0'
linear = False
modality = False
name = 'Mishra 8 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.m_func.Mishra09(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f(x) = \left[ ab^2c + abc^2 + b^2 + (x_1 + x_2 - x_3)^2\]

ight]^2

Where, in this exercise:

\[egin{cases} a = 2x_1^3 + 5x_1x_2 + 4x_3 - 2x_1^2x_3 - 18 \ b = x_1 + x_2^3 + x_1x_2^2 + x_1x_3^2 - 22 \ c = 8x_1^2 + 2x_2x_3 + 2x_2^2 + 3x_2^3 - 52 \end{cases}\]

Here \(x_i \in [-10, 10] for i \in [1, 2, 3]\). Global optimum: :math:`f(1, 2, 3) = 0, `

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = '\\left[ ab^2c + abc^2 + b^2 + (x_1 + x_2 - x_3)^2 \\right]^2'
latex_formula_bounds = 'x_i \\in [-10, 10] \\forall i \\in [1, 2, 3]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(1, 2, 3) = 0'
linear = False
modality = False
name = 'Mishra 9 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.m_func.Mishra10(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f(x) = \left[ \lfloor x_1 \perp x_2\]

floor - lfloor x_1 floor - lfloor x_2 floor ight]^2

Here \(x_i \in [-10, 10] for i \in [1, 2]\). Global optimum: :math:`f(2, 2) = 0, `

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = '\\left[ \\lfloor x_1 \\perp x_2 \\rfloor - \\lfloor x_1 \\rfloor - \\lfloor x_2 \\rfloor \\right]^2'
latex_formula_bounds = 'x_i \\in [-10, 10] \\forall i \\in [1, 2]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(2, 2) = 0'
linear = False
modality = False
name = 'Mishra 10 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.m_func.Mishra11(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f(x) = \left [\]

rac{1}{n} sum_{i=1}^{n} lvert x_i vert - left(prod_{i=1}^{n} lvert x_i vert ight )^{ rac{1}{n}} ight]^2

Here \(x_i \in [-10, 10] for i \in [1, 2]\). Global optimum: :math:`f(0) = 0, `

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = '\\left [ \\frac{1}{n} \\sum_{i=1}^{n} \\lvert x_i \\rvert - \\left(\\prod_{i=1}^{n} \\lvert x_i \\rvert \\right )^{\\frac{1}{n}} \\right]^2'
latex_formula_bounds = 'x_i \\in [-10, 10] \\forall i \\in [1, n]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(0) = 0'
linear = False
modality = False
name = 'Mishra 11 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.m_func.MultiModal(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015

\[f(x) = \left( \sum_{i=1}^n \lvert x_i\]

vert ight) left( prod_{i=1}^n lvert x_i vert ight)

Here \(x_i \in [-10, 10] for i \in [1, n]\). Global optimum: :math:`f(0) = 0, `

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = '\\left( \\sum_{i=1}^n \\lvert x_i \\rvert \\right) \\left( \\prod_{i=1}^n \\lvert x_i \\rvert \\right)'
latex_formula_bounds = 'x_i \\in [-10, 10] \\forall i \\in [1, n]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(0) = 0'
linear = False
modality = False
name = 'Mishra 11 Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False

opfunu.name_based.n_func module

class opfunu.name_based.n_func.NeedleEye(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015

\[f_{ ext{NeedleEye}}(x) = egin{cases} 1 & extrm{if }\hspace{5pt} \lvert x_i\]

vert < eye hspace{5pt}

orall i

sum_{i=1}^n (100 + lvert x_i

vert) & extrm{if } hspace{5pt}

lvert x_i

vert > eye

0 & extrm{otherwise}end{cases}

Here \(x_i \in [-10, 10]\) for \(i = 1, 2,...,n\). Global optimum: \(f(x) = 1.0\)

continuous = False
convex = True
differentiable = False
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Matyas}}(x) = \\begin{cases} 1 & \\textrm{if }\\hspace{5pt} \\lvert x_i \\rvert  <  eye \\hspace{5pt} \\forall i \\\\ \\sum_{i=1}^n (100 + \\lvert x_i \\rvert) & \\textrm{if } \\hspace{5pt}\\lvert x_i \\rvert > eye \\\\ 0 & \\textrm{otherwise}\\\\\\end{cases}'
latex_formula_bounds = 'x_i \\in [-10, 10, ..., 10]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(0, 0, ...,0) = 1.0'
linear = False
modality = False
name = 'NeedleEye Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = False
class opfunu.name_based.n_func.NewFunction01(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Mishra, S. Global Optimization by Differential Evolution and

Particle Swarm Methods: Evaluation on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005

\[f_{ ext{NewFunction01}}(x) = \left | {\cos\left(\sqrt{\left|{x_{1}^{2} + x_{2}}\]

ight|} ight)} ight |^{0.5} + (x_{1} + x_{2})/100

Here \(x_i \in [-10, 10]\) for \(i = 1, 2\). Global optimum: \(f(x) = -0.18459899925\)

continuous = False
convex = True
differentiable = False
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\text{NewFunction01}}(x) = \\left | {\\cos\\left(\\sqrt{\\left|{x_{1}^{2}+ x_{2}}\right|}\right)} \right |^{0.5} + (x_{1} + x_{2})/100'
latex_formula_bounds = 'x_i \\in [-10, 10]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f([-8.46669057, -9.99982177]) = -0.18459899925'
linear = False
modality = False
name = 'NewFunction01 Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = False
class opfunu.name_based.n_func.NewFunction02(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Mishra, S. Global Optimization by Differential Evolution and

Particle Swarm Methods: Evaluation on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005

\[f_{ ext{NewFunction02}}(x) = \left | {\sin\left(\sqrt{\lvert{x_{1}^{2} + x_{2}}\]

vert} ight)} ight |^{0.5} + (x_{1} + x_{2})/100

Here \(x_i \in [-10, 10]\) for \(i = 1, 2\). Global optimum: \(f(x) = -0.19933159253\)

continuous = False
convex = True
differentiable = False
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\text{NewFunction02}}(x) = \\left | {\\sin\\left(\\sqrt{\\lvert{x_{1}^{2} + x_{2}}\rvert}\right)} \right |^{0.5} + (x_{1} + x_{2})/100'
latex_formula_bounds = 'x_i \\in [-10, 10]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f([-9.94103375, -9.99771235]) = -0.19933159253'
linear = False
modality = False
name = 'NewFunction02 Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = False

opfunu.name_based.o_func module

class opfunu.name_based.o_func.OddSquare(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015

\[f_{ ext{OddSquare}}(x) = -e^{-\]

rac{d}{2pi}} cos(pi d) left( 1 + rac{0.02h}{d + 0.01} ight )

Where, in this exercise:

\[egin{cases} d = n \cdot \smash{\displaystyle\max_{1 \leq i \leq n}} \left[ (x_i - b_i)^2\]
ight ]

h = sum_{i=1}^{n} (x_i - b_i)^2 end{cases}

And :math:`b = [1, 1.3, 0.8, -0.4, -1.3, 1.6, -0.2, -0.6, 0.5, 1.4, 1, 1.3,

0.8, -0.4, -1.3, 1.6, -0.2, -0.6, 0.5, 1.4]`

Here \(x_i \in [-5 \pi, 5 \pi]\) for \(i = 1, ..., n\). n leq 20. Global optimum: \(f(x) = -1.00846728102\)

continuous = False
convex = True
differentiable = False
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{OddSquare}}(x) = -e^{-\\frac{d}{2\\pi}} \\cos(\\pi d) \\left( 1 + \\frac{0.02h}{d + 0.01} \\right )'
latex_formula_bounds = 'x_i \\in [-5 \\pi, 5 \\pi]'
latex_formula_dimension = 'd = 20'
latex_formula_global_optimum = 'f(b) = -1.00846728102'
linear = False
modality = False
name = 'Odd Square Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False

opfunu.name_based.p_func module

class opfunu.name_based.p_func.Parsopoulos(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems

Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f_{ ext{Parsopoulos}}(x) = \cos(x_1)^2 + \sin(x_2)^2\]

with \(x_i \in [-5, 5]\) for \(i = 1, 2\).

Global optimum: This function has infinite number of global minima in R2, at points :math:`left(k

rac{pi}{2}, lambda pi ight)`, where \(k = \pm1, \pm3, ...\) and \(\lambda = 0, \pm1, \pm2, ...\)

In the given domain problem, function has 12 global minima all equal to zero.

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Parsopoulos}}(x) = \\cos(x_1)^2 + \\sin(x_2)^2'
latex_formula_bounds = 'x_i \\in [-10, 10, ..., 10]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(0, 0, ...,0) = 1.0'
linear = False
modality = False
name = 'Parsopoulos Function'
parametric = False
randomized_term = False
scalable = True
separable = True
unimodal = False

opfunu.name_based.q_func module

class opfunu.name_based.q_func.Qing(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems

Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f_{ ext{Qing}}(x) = \sum_{i=1}^{n} (x_i^2 - i)^2\]

Here, \(n\) represents the number of dimensions and \(x_i \in [-500, 500]\) for \(i = 1, ..., n\).

Global optimum: \(f(x) = 0\) for \(x_i = \pm \sqrt(i)\) for \(i = 1, ..., n\)

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Qing}}(x) = \\sum_{i=1}^{n} (x_i^2 - i)^2'
latex_formula_bounds = 'x_i \\in [-10, 10, ..., 10]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(0, 0, ...,0) = 1.0'
linear = False
modality = False
name = 'Qing Function'
parametric = False
randomized_term = False
scalable = True
separable = True
unimodal = False
class opfunu.name_based.q_func.Quadratic(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems

Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f_{ ext{Quadratic}}(x) = -3803.84 - 138.08x_1 - 232.92x_2 + 128.08x_1^2+ 203.64x_2^2 + 182.25x_1x_2\]

Here, \(n\) represents the number of dimensions and \(x_i \in [-10, 10]\) for \(i = 1, 2\).

Global optimum: \(f(x) = -3873.72418\) for \(x = [0.19388, 0.48513]\)

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Quadratic}}(x) = -3803.84 - 138.08x_1 - 232.92x_2 + 128.08x_1^2+ 203.64x_2^2 + 182.25x_1x_2'
latex_formula_bounds = 'x_i \\in [-10, 10, ..., 10]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(0, 0, ...,0) = 1.0'
linear = False
modality = False
name = 'Quadratic Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.q_func.Quartic(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems

Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f_{ ext{Quartic}}(x) =\]

Here, \(n\) represents the number of dimensions and \(x_i \in [-10, 10]\) for \(i = 1, 2\).

Global optimum: \(f(x) = -3873.72418\) for \(x = [0.19388, 0.48513]\)

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Quartic}}(x) = '
latex_formula_bounds = 'x_i \\in [-10, 10, ..., 10]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(0, 0, ...,0) = 1.0'
linear = False
modality = False
name = 'Quartic Function'
parametric = False
randomized_term = True
scalable = False
separable = False
unimodal = False
class opfunu.name_based.q_func.Quintic(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems

Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f_{ ext{Quintic}}(x) = \sum_{i=1}^{n} \left|{x_{i}^{5} - 3 x_{i}^{4}+ 4 x_{i}^{3} + 2 x_{i}^{2} - 10 x_{i} -4}\]

ight|

Here, \(n\) represents the number of dimensions and \(x_i \in [-10, 10]\) for \(i = 1, ..., n\).

Global optimum: \(f(x_i) = 0\) for \(x_i = -1\) for \(i = 1, ..., n\)

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Quintic}}(x) = \\sum_{i=1}^{n} \\left|{x_{i}^{5} - 3 x_{i}^{4}+ 4 x_{i}^{3} + 2 x_{i}^{2} - 10 x_{i} -4}\\right|'
latex_formula_bounds = 'x_i \\in [-10, 10, ..., 10]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(0, 0, ...,0) = 1.0'
linear = False
modality = True
name = 'Quartic Function'
parametric = False
randomized_term = False
scalable = True
separable = True
unimodal = False

opfunu.name_based.r_func module

class opfunu.name_based.r_func.Rana(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems

Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f_{ ext{Rana}}(x) = \sum_{i=1}^{n} \left[x_{i} \sin\left(\sqrt{\lvert{x_{1} - x_{i} + 1}\]

vert} ight)

cosleft(sqrt{lvert{x_{1} + x_{i} + 1}

vert} ight) + left(x_{1} + 1 ight) sinleft(sqrt{lvert{x_{1} + x_{i} +

1}

vert} ight) cosleft(sqrt{lvert{x_{1} - x_{i} +1} vert} ight) ight]

Here, \(n\) represents the number of dimensions and \(x_i \in [-500.0, 500.0]\) for \(i = 1, ..., n\).

Global optimum: \(f(x_i) = -928.5478\) for \(x = [-300.3376, 500]\).

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Rana}}(x) = '
latex_formula_bounds = 'x_i \\in [-10, 10, ..., 10]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(0, 0, ...,0) = 1.0'
linear = False
modality = True
name = 'Qing Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = False

opfunu.name_based.s_func module

class opfunu.name_based.s_func.Salomon(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems

Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f_{ ext{Salomon}}(x) = 1 - \cos \left (2 \pi \sqrt{\sum_{i=1}^{n} x_i^2}\]

ight) + 0.1 sqrt{sum_{i=1}^n x_i^2}

Here, \(n\) represents the number of dimensions and \(x_i \in [-100, 100]\) for \(i = 1, ..., n\).

Global optimum: \(f(x) = 0\) for \(x_i = 0\) for \(i = 1, ..., n\)

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Salomon}}(x) = 1 - \\cos \\left (2 \\pi \\sqrt{\\sum_{i=1}^{n} x_i^2} \\right) + 0.1 \\sqrt{\\sum_{i=1}^n x_i^2}'
latex_formula_bounds = 'x_i \\in [-10, 10, ..., 10]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(0, 0, ...,0) = 1.0'
linear = False
modality = True
name = 'Qing Function'
parametric = False
randomized_term = False
scalable = True
separable = False
unimodal = False

opfunu.name_based.t_func module

class opfunu.name_based.t_func.TestTubeHolder(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems

Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f_{ ext{TestTubeHolder}}(x) = - 4 \left | {e^{\left|{\cos \left(\]

rac{1}{200} x_{1}^{2} +

rac{1}{200} x_{2}^{2} ight)} ight|}sinleft(x_{1} ight) cosleft(x_{2} ight)} ight|

with \(x_i \in [-10, 10]\) for \(i = 1, 2\).

Global optimum: \(f(x) = -10.872299901558\) for \(x= [-\pi/2, 0]\)

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{TestTubeHolder}}(x)='
latex_formula_bounds = 'x_i \\in [-10, 10, ..., 10]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(0, 0, ...,0) = 1.0'
linear = False
modality = True
name = 'Qing Function'
parametric = False
randomized_term = False
scalable = False
separable = True
unimodal = False

opfunu.name_based.u_func module

class opfunu.name_based.u_func.Ursem01(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems

Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f_{ ext{Ursem01}}(x) = - \sin(2x_1 - 0.5 \pi) - 3 \cos(x_2) - 0.5 x_1\]

with \(x_1 \in [-2.5, 3]\) and \(x_2 \in [-2, 2]\).

Global optimum: \(f(x) = -4.81681406371\) for \(x = [1.69714, 0.0]\)

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\\text{Ursem01}}(x) = - \\sin(2x_1 - 0.5 \\pi) - 3 \\cos(x_2) - 0.5 x_1'
latex_formula_bounds = 'x_i \\in [-10, 10, ..., 10]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(0, 0, ...,0) = 1.0'
linear = False
modality = True
name = 'Qing Function'
parametric = False
randomized_term = False
scalable = False
separable = True
unimodal = False

opfunu.name_based.v_func module

class opfunu.name_based.v_func.VenterSobiezcczanskiSobieski(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems

Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f(x) = x_1^2 - 100 \cos^2(x_1) - 100 \cos(x_1^2/30)+ x_2^2 - 100 \cos^2(x_2)- 100 \cos(x_2^2/30)\]

with \(x_i \in [-50, 50]\) for \(i = 1, 2\).

Global optimum: \(f(x) = -400\) for \(x = [0, 0]\)

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = x_1^2 - 100 \\cos^2(x_1) - 100 \\cos(x_1^2/30)+ x_2^2 - 100 \\cos^2(x_2)- 100 \\cos(x_2^2/30)'
latex_formula_bounds = 'x_i \\in [-10, 10, ..., 10]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(0, 0, ...,0) = 1.0'
linear = False
modality = True
name = 'VenterSobiezcczanskiSobieski Function'
parametric = False
randomized_term = False
scalable = False
separable = True
unimodal = False

opfunu.name_based.w_func module

class opfunu.name_based.w_func.Watson(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems

Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f(x) = \sum_{i=0}^{29} \left\{\sum_{j=0}^4 ((j + 1)a_i^j x_{j+1}) - \left[ \sum_{j=0}^5 a_i^j x_{j+1}\]

ight ]^2 - 1 ight}^2 + x_1^2

Where, in this exercise, \(a_i = i/29\). with \(x_i \in [-5, 5]\) for \(i = 1, ..., 6\).

Global optimum: \(f(x) = 0.002288\) for \(x = [-0.0158, 1.012, -0.2329, 1.260, -1.513, 0.9928]\)

continuous = True
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\sum_{i=0}^{29} \\left\\{\\sum_{j=0}^4 ((j + 1)a_i^j x_{j+1}) - \\left[ \\sum_{j=0}^5 a_i^j x_{j+1} \\right ]^2 - 1 \\right\\}^2 + x_1^2'
latex_formula_bounds = 'x_i \\in [-10, 10, ..., 10]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(0, 0, ...,0) = 1.0'
linear = False
modality = True
name = 'Watson Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = True

opfunu.name_based.x_func module

class opfunu.name_based.x_func.XinSheYang01(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems

Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f(x) = \sum_{i=1}^{n} \epsilon_i \lvert x_i\]

vert^i

The variable \(\epsilon_i, (i = 1, ..., n)\) is a random variable uniformly distributed in \([0, 1]\).

Here, \(n\) represents the number of dimensions and \(x_i \in [-5, 5]\) for \(i = 1, ..., n\).

Global optimum: \(f(x) = 0\) for \(x_i = 0\) for \(i = 1, ..., n\)

continuous = True
convex = True
differentiable = False
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = \\sum_{i=1}^{n} \\epsilon_i \\lvert x_i \\rvert^i'
latex_formula_bounds = 'x_i \\in [-10, 10, ..., 10]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(0, 0, ...,0) = 1.0'
linear = False
modality = True
name = 'Xin-She Yang 1 Function'
parametric = False
randomized_term = False
scalable = True
separable = True
unimodal = False

opfunu.name_based.y_func module

class opfunu.name_based.y_func.YaoLiu04(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Yao X., Liu Y. (1997) Fast evolution strategies. In: Angeline P.J., Reynolds R.G., McDonnell J.R., Eberhart R. (eds)

Evolutionary Programming VI. EP 1997. Lecture Notes in Computer Science, vol 1213. Springer, Berlin, Heidelberg

2

Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation

on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005

\[f(x) = {max}_i \left\{ \left | x_i\]

ight | , 1 leq i leq n ight}

Here, \(n\) represents the number of dimensions and \(x_i \in [-10, 10]\) for \(i = 1, ..., n\).

Global optimum: \(f(x) = 0\) for \(x_i = 0\) for \(i = 1, ..., n\)

continuous = True
convex = True
differentiable = False
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f(x) = {max}_i \\left\\{ \\left | x_i \\right | , 1 \\leq i \\leq n \\right\\}'
latex_formula_bounds = 'x_i \\in [-10, 10, ..., 10]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(0, 0, ...,0) = 1.0'
linear = False
modality = False
name = 'Yao-Liu 4 Function'
parametric = False
randomized_term = False
scalable = True
separable = True
unimodal = False

opfunu.name_based.z_func module

class opfunu.name_based.z_func.Zacharov(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization

Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.

\[f_{ ext{Zacharov}}(x) = \sum_{i=1}^{n} x_i^2 + \left (\]

rac{1}{2}sum_{i=1}^{n} i x_i ight )^2

  • left (

rac{1}{2} sum_{i=1}^{n} i x_i ight )^4

Here \(x_i \in [-5, 10]\) for \(i = 1, ..., n\). Global optimum: \(f(x) = 0.0\)

continuous = False
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = '\\sum_{i=1}^{n} x_i^2 + \\left ( \x0crac{1}{2}\\sum_{i=1}^{n} i x_i \right )^2+ \\left ( \x0crac{1}{2} \\sum_{i=1}^{n} i x_i \right )^4'
latex_formula_bounds = 'x_i \\in [-5, 10]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(0, 0,...,0) = 0'
linear = False
modality = False
name = 'Zacharov Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.z_func.ZeroSum(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015

\[f_{ ext{ZeroSum}}(x) = egin{cases} 0 & extrm{if} \sum_{i=1}^n x_i = 0 \ 1 + \left(10000 \left |\sum_{i=1}^n x_i\]

ight| ight)^{0.5} & extrm{otherwise} end{cases}

Here \(x_i \in [-10, 10]\) for \(i = 1, ..., n\). Global optimum: \(f(x) = 0.0\)sum_{i=1}^n x_i = 0`

continuous = False
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = '\x08egin{cases} 0 & \textrm{if} \\sum_{i=1}^n x_i = 0 \\ 1 + \\left(10000 \\left |\\sum_{i=1}^n x_i\right| \right)^{0.5} & \textrm{otherwise} \\end{cases}'
latex_formula_bounds = 'x_i \\in [-10, 10]'
latex_formula_dimension = 'd = n'
latex_formula_global_optimum = 'f(x_best) = 0'
linear = False
modality = False
name = 'ZeroSum Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.z_func.Zettl(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015

\[f_{ ext{Zettl}}(x) =\]
rac{1}{4} x_{1} + left(x_{1}^{2} - 2 x_{1}
  • x_{2}^{2}

ight)^{2}

Here \(x_i \in [-1, 5]\) for \(i = 1, 2\). Global optimum: \(f(x) = -0.0037912\)

continuous = False
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = '\\sum_{i=1}^{n} x_i^2 + \\left ( \x0crac{1}{2}\\sum_{i=1}^{n} i x_i \right )^2+ \\left ( \x0crac{1}{2} \\sum_{i=1}^{n} i x_i \right )^4'
latex_formula_bounds = 'x_i \\in [-0.029896, 0.0]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f(x_best) = -0.0037912'
linear = False
modality = False
name = 'Zettl Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.z_func.Zimmerman(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015

\[f_{ ext{Zimmerman}}(x) = \max \left[Zh1(x), Zp(Zh2(x)) extrm{sgn}(Zh2(x)), Zp(Zh3(x)) extrm{sgn}(Zh3(x)), Zp(-x_1) extrm{sgn}(x_1),Zp(-x_2) extrm{sgn}(x_2)\]

ight]

\[egin{cases} Zh1(x) = 9 - x_1 - x_2 \ Zh2(x) = (x_1 - 3)^2 + (x_2 - 2)^2 \ Zh3(x) = x_1x_2 - 14 \ Zp(t) = 100(1 + t) \end{cases}\]

Where \(x\) is a vector and \(t\) is a scalar. Here, \(x_i \in [0, 100]\) for \(i = 1, 2\). Global optimum: \(f(x) = 0\) for \(x = [7, 2]\)

continuous = False
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = '\\max \\left[Zh1(x), Zp(Zh2(x))\textrm{sgn}(Zh2(x)), Zp(Zh3(x))\textrm{sgn}(Zh3(x)), Zp(-x_1)\textrm{sgn}(x_1),Zp(-x_2)\textrm{sgn}(x_2) \right]'
latex_formula_bounds = 'x_i \\in [0, 100]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f([7, 2]) = 0.'
linear = False
modality = False
name = 'Zimmerman Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = False
class opfunu.name_based.z_func.Zirilli(ndim=None, bounds=None)[source]

Bases: opfunu.benchmark.Benchmark

1

Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015

\[f_{ ext{Zirilli}}(x) = 0.25x_1^4 - 0.5x_1^2 + 0.1x_1 + 0.5x_2^2\]
\[egin{cases} Zh1(x) = 9 - x_1 - x_2 \ Zh2(x) = (x_1 - 3)^2 + (x_2 - 2)^2 \ Zh3(x) = x_1x_2 - 14 \ Zp(t) = 100(1 + t) \end{cases}\]

Where \(x\) is a vector and \(t\) is a scalar. Here, \(x_i \in [-10, 10]\) for \(i = 1, 2\). Global optimum: \(f(x) = -0.3523\) for \(x = [-1.0465, 0]\)

continuous = False
convex = True
differentiable = True
evaluate(x, *args)[source]

Evaluation of the benchmark function.

Parameters

x (np.ndarray) – The candidate vector for evaluating the benchmark problem. Must have len(x) == self.ndim.

Returns

val – the evaluated benchmark function

Return type

float

latex_formula = 'f_{\text{Zirilli}}(x) = 0.25x_1^4 - 0.5x_1^2 + 0.1x_1 + 0.5x_2^2'
latex_formula_bounds = 'x_i \\in [-10, 10]'
latex_formula_dimension = 'd = 2'
latex_formula_global_optimum = 'f([-1.0465, 0]) = -0.3523'
linear = False
modality = False
name = 'Zirilli Function'
parametric = False
randomized_term = False
scalable = False
separable = False
unimodal = True