diff --git a/.tools/envs/testenv-linux.yml b/.tools/envs/testenv-linux.yml
index 39cfd0b69..74817d3c0 100644
--- a/.tools/envs/testenv-linux.yml
+++ b/.tools/envs/testenv-linux.yml
@@ -28,12 +28,14 @@ dependencies:
   - jinja2  # dev, tests
   - annotated-types  # dev, tests
   - iminuit  # dev, tests
+  - cma  # dev, tests
   - pip:  # dev, tests, docs
-      - nevergrad  # dev, tests
       - DFO-LS>=1.5.3  # dev, tests
       - Py-BOBYQA  # dev, tests
       - fides==0.7.4  # dev, tests
+      - nevergrad  # dev, tests
       - kaleido>=1.0  # dev, tests
+      - bayes_optim  # dev, tests
       - pandas-stubs  # dev, tests
       - types-cffi  # dev, tests
       - types-openpyxl  # dev, tests
diff --git a/.tools/envs/testenv-numpy.yml b/.tools/envs/testenv-numpy.yml
index 015dd9b52..5d882153a 100644
--- a/.tools/envs/testenv-numpy.yml
+++ b/.tools/envs/testenv-numpy.yml
@@ -26,12 +26,14 @@ dependencies:
   - jinja2  # dev, tests
   - annotated-types  # dev, tests
   - iminuit  # dev, tests
+  - cma  # dev, tests
   - pip:  # dev, tests, docs
-      - nevergrad  # dev, tests
       - DFO-LS>=1.5.3  # dev, tests
       - Py-BOBYQA  # dev, tests
       - fides==0.7.4  # dev, tests
+      - nevergrad  # dev, tests
       - kaleido>=1.0  # dev, tests
+      - bayes_optim  # dev, tests
       - types-cffi  # dev, tests
       - types-openpyxl  # dev, tests
       - types-jinja2  # dev, tests
diff --git a/.tools/envs/testenv-others.yml b/.tools/envs/testenv-others.yml
index e300065fe..0a021d31c 100644
--- a/.tools/envs/testenv-others.yml
+++ b/.tools/envs/testenv-others.yml
@@ -26,12 +26,14 @@ dependencies:
   - jinja2  # dev, tests
   - annotated-types  # dev, tests
   - iminuit  # dev, tests
+  - cma  # dev, tests
   - pip:  # dev, tests, docs
-      - nevergrad  # dev, tests
       - DFO-LS>=1.5.3  # dev, tests
       - Py-BOBYQA  # dev, tests
       - fides==0.7.4  # dev, tests
+      - nevergrad  # dev, tests
       - kaleido>=1.0  # dev, tests
+      - bayes_optim  # dev, tests
       - pandas-stubs  # dev, tests
       - types-cffi  # dev, tests
       - types-openpyxl  # dev, tests
diff --git a/.tools/envs/testenv-pandas.yml b/.tools/envs/testenv-pandas.yml
index 9f8fc6d7d..04956719b 100644
--- a/.tools/envs/testenv-pandas.yml
+++ b/.tools/envs/testenv-pandas.yml
@@ -26,12 +26,14 @@ dependencies:
   - jinja2  # dev, tests
   - annotated-types  # dev, tests
   - iminuit  # dev, tests
+  - cma  # dev, tests
   - pip:  # dev, tests, docs
-      - nevergrad  # dev, tests
       - DFO-LS>=1.5.3  # dev, tests
       - Py-BOBYQA  # dev, tests
       - fides==0.7.4  # dev, tests
+      - nevergrad  # dev, tests
       - kaleido>=1.0  # dev, tests
+      - bayes_optim  # dev, tests
       - types-cffi  # dev, tests
       - types-openpyxl  # dev, tests
       - types-jinja2  # dev, tests
diff --git a/.tools/envs/testenv-plotly.yml b/.tools/envs/testenv-plotly.yml
index 27504174b..3552ee709 100644
--- a/.tools/envs/testenv-plotly.yml
+++ b/.tools/envs/testenv-plotly.yml
@@ -26,11 +26,13 @@ dependencies:
   - jinja2  # dev, tests
   - annotated-types  # dev, tests
   - iminuit  # dev, tests
+  - cma  # dev, tests
   - pip:  # dev, tests, docs
-      - nevergrad  # dev, tests
       - DFO-LS>=1.5.3  # dev, tests
       - Py-BOBYQA  # dev, tests
       - fides==0.7.4  # dev, tests
+      - nevergrad  # dev, tests
+      - bayes_optim  # dev, tests
       - pandas-stubs  # dev, tests
       - types-cffi  # dev, tests
       - types-openpyxl  # dev, tests
diff --git a/docs/source/algorithms.md b/docs/source/algorithms.md
index d9e43a004..854df8c35 100644
--- a/docs/source/algorithms.md
+++ b/docs/source/algorithms.md
@@ -3984,8 +3984,6 @@ iminuit).
       - Values greater than 1 specify the maximum number of restart attempts.  
 ```
 
-(nevergrad-algorithms)=
-
 ## Nevergrad Optimizers
 
 optimagic supports some algorithms from the
@@ -4001,46 +3999,590 @@ these optimizers, you need to have
 
         "nevergrad_pso"
 
-    Minimize a scalar function using the Particle Swarm Optimization (PSO) algorithm.
-
-    The Particle Swarm Optimization algorithm was originally proposed by
-    :cite:`Kennedy1995`. The implementation in Nevergrad is based on
-    :cite:`Zambrano2013`.
-
-    Particle Swarm Optimization (PSO) solves a problem by having a population of
-    candidate solutions, here dubbed particles, and moving these particles around in the
-    search-space according to simple mathematical formulae over the particle's position
-    and velocity. Each particle's movement is influenced by its local best known
-    position (termed "cognitive" component), but is also guided toward the best known
-    positions (termed "social" component) in the search-space, which are updated as
-    better positions are found by other particles. This is expected to move the swarm
-    toward the best solutions.
-
-    - **transform** (str): The transform to use to map from PSO optimization space to
-      R-space. Available options are:
-      - "arctan" (default)
-      - "identity"
-      - "gaussian"
-    - **population_size** (int): Population size of the particle swarm.
-    - **n_cores** (int): Number of cores to use.
-    - **seed** (int): Seed used by the internal random number generator.
-    - **stopping.maxfun** (int): Maximum number of function evaluations.
-    - **inertia** (float): Inertia weight. Denoted by :math:`\omega`.
-      Default is 0.7213475204444817. To prevent divergence, the value must be smaller
-      than 1. It controls the influence of the particle's previous velocity on its
-      movement.
-    - **cognitive** (float): Cognitive coefficient. Denoted by :math:`\phi_p`.
-      Default is 1.1931471805599454. Typical values range from 1.0 to 3.0. It controls
-      the influence of its own best known position on the particle's movement.
-    - **social** (float): Social coefficient. Denoted by :math:`\phi_g`.
-      Default is 1.1931471805599454. Typical values range from 1.0 to 3.0. It controls
-      the influence of the swarm's best known position on the particle's movement.
-    - **quasi_opp_init** (bool): Whether to use quasi-opposition initialization.
-      Default is False.
-    - **speed_quasi_opp_init** (bool): Whether to use quasi-opposition initialization
-      for speed. Default is False.
-    - **special_speed_quasi_opp_init** (bool): Whether to use special quasi-opposition
-      initialization for speed. Default is False.
+    Minimize a scalar function using the Particle Swarm Optimization algorithm.
+    
+    The Particle Swarm Optimization algorithm was originally proposed by :cite:`Kennedy1995`.The
+    implementation in Nevergrad is based on :cite:`Zambrano2013`.
+    
+    PSO solves an optimization problem by evolving a swarm of particles (candidate solutions) across the
+    search space. Each particle adjusts its position based on its own experience (cognitive component)
+    and the experiences of its neighbors or the swarm (social component), using velocity updates. The
+    algorithm iteratively guides the swarm toward promising regions of the search space.
+
+    - **transform** (str): The transform used to map from PSO optimization space to real space. Options:
+              - "arctan" (default)
+              - "identity"
+              - "gaussian"
+    - **population\_size** (int): The number of particles in the swarm.
+    - **n\_cores** (int): The number of CPU cores to use for parallel computation.
+    - **seed** (int, optional): Random seed for reproducibility.
+    - **stopping\_maxfun** (int, optional): Maximum number of function evaluations.
+    - **inertia** (float):
+      Inertia weight ω. Controls the influence of a particle's previous velocity. Must be less than 1 to
+      avoid divergence. Default is 0.7213475204444817.
+    - **cognitive** (float):
+      Cognitive coefficient :math:`\phi_p`. Controls the influence of a particle’s own best known
+      position. Typical values: 1.0 to 3.0. Default is 1.1931471805599454.
+    - **social** (float):
+      Social coefficient. Denoted by :math:`\phi_g`. Controls the influence of the swarm’s best known
+      position. Typical values: 1.0 to 3.0. Default is 1.1931471805599454.
+    - **quasi\_opp\_init** (bool): Whether to use quasi-opposition initialization. Default is False.
+    - **speed\_quasi\_opp\_init** (bool):
+      Whether to apply quasi-opposition initialization to speed. Default is False.
+    - **special\_speed\_quasi\_opp\_init** (bool):
+      Whether to use special quasi-opposition initialization for speed. Default is False.
+    - **sigma**:
+      Standard deviation for sampling initial population from N(0, σ²) in case bounds are not provided.
+```
+
+```{eval-rst}
+.. dropdown:: nevergrad_cmaes
+
+    .. code-block::
+
+        "nevergrad_cmaes"
+
+    Minimize a scalar function using the Covariance Matrix Adaptation Evolution Strategy (CMA-ES)
+    algorithm.
+    
+    The CMA-ES (Covariance Matrix Adaptation Evolution Strategy) is a state-of-the-art evolutionary
+    algorithm designed for difficult non-linear, non-convex, black-box optimization problems in
+    continuous domains. It is typically applied to unconstrained or bounded optimization problems with
+    dimensionality between 3 and 100. CMA-ES adapts a multivariate normal distribution to approximate
+    the shape of the objective function. It estimates a positive-definite covariance matrix, akin to the
+    inverse Hessian in convex-quadratic problems, but without requiring derivatives or their
+    approximation. Original paper can be accessed at `cma <https://cma-es.github.io/>`_. This
+    implementation is a python wrapper over the original code `pycma <https://cma-es.github.io/>`_.
+
+    - **scale**: Scale of the search.
+    - **elitist**:
+      Whether to switch to elitist mode (also known as (μ,λ)-CMA-ES). In elitist mode, the best point in
+      the population is always retained.
+    - **population\_size**: Population size.
+    - **diagonal**: Use the diagonal version of CMA, which is more efficient for high-dimensional problems.
+    - **high\_speed**: Use a metamodel for recommendation to speed up optimization.
+    - **fast\_cmaes**:
+      Use the fast CMA-ES implementation. Cannot be used with diagonal=True. Produces equivalent results
+      and is preferable for high dimensions or when objective function evaluations are fast.
+    - **random\_init**: If True, initialize the optimizer with random parameters.
+    - **n\_cores**: Number of cores to use for parallel function evaluation.
+    - **step\_size\_adaptive**:
+      Whether to adapt the step size. Can be a boolean or a string specifying the adaptation strategy.
+    - **CSA\_dampfac**: Damping factor for step size adaptation.
+    - **CMA\_dampsvec\_fade**: Damping rate for step size adaptation.
+    - **CSA\_squared**: Whether to use squared step sizes in updates.
+    - **CMA\_on**: Learning rate for the covariance matrix update.
+    - **CMA\_rankone**: Multiplier for the rank-one update learning rate of the covariance matrix.
+    - **CMA\_rankmu**: Multiplier for the rank-mu update learning rate of the covariance matrix.
+    - **CMA\_cmean**: Learning rate for the mean update.
+    - **CMA\_diagonal\_decoding**: Learning rate for the diagonal update.
+    - **num\_parents**: Number of parents (μ) for recombination.
+    - **CMA\_active**: Whether to use negative updates for the covariance matrix.
+    - **CMA\_mirrormethod**: Strategy for mirror sampling. Possible values are:
+    - **0**: Unconditional mirroring
+    - **1**: Selective mirroring
+    - **2**: Selective mirroring with delay (default)
+    - **CMA\_const\_trace**: How to normalize the trace of the covariance matrix. Valid values are:
+              - False: No normalization
+              - True: Normalize to 1
+              - "arithm": Arithmetic mean normalization
+              - "geom": Geometric mean normalization
+              - "aeig": Arithmetic mean of eigenvalues
+              - "geig": Geometric mean of eigenvalues
+    - **CMA\_diagonal**:
+      Number of iterations to use diagonal covariance matrix before switching to full matrix. If False,
+      always use full matrix.
+    - **stopping\_maxfun**: Maximum number of function evaluations before termination.
+    - **stopping\_maxiter**: Maximum number of iterations before termination.
+    - **stopping\_timeout**: Maximum time in seconds before termination.
+    - **stopping\_cov\_mat\_cond**: Maximum condition number of the covariance matrix before termination.
+    - **convergence\_ftol\_abs**: Absolute tolerance on function value changes for convergence.
+    - **convergence\_ftol\_rel**: Relative tolerance on function value changes for convergence.
+    - **convergence\_xtol\_abs**: Absolute tolerance on parameter changes for convergence.
+    - **convergence\_iter\_noimprove**: Number of iterations without improvement before termination.
+    - **invariant\_path**: Whether evolution path (pc) should be invariant to transformations.
+    - **eval\_final\_mean**: Whether to evaluate the final mean solution.
+    - **seed**: Seed used by the internal random number generator for reproducibility.
+    - **sigma**:
+      Standard deviation for sampling initial population from N(0, σ²) in case bounds are not provided.
+```
+
+```{eval-rst}
+.. dropdown:: nevergrad_oneplusone
+
+    .. code-block::
+
+        "nevergrad_oneplusone"
+
+    Minimize a scalar function using the One Plus One Evolutionary algorithm from Nevergrad.
+    
+    THe One Plus One evolutionary algorithm iterates to find a set of parameters that minimizes the loss
+    function. It does this by perturbing, or mutating, the parameters from the last iteration (the
+    parent). If the new (child) parameters yield a better result, then the child becomes the new parent
+    whose parameters are perturbed, perhaps more aggressively. If the parent yields a better result, it
+    remains the parent and the next perturbation is less aggressive. Originally proposed by
+    :cite:`Rechenberg1973`. The implementation in Nevergrad is based on the one-fifth adaptation rule,
+    going back to :cite:`Schumer1968.
+
+    - **noise\_handling**: Method for handling the noise, can be
+          - "random": A random point is reevaluated regularly using the one-fifth adaptation rule.
+          - "optimistic": The best optimistic point is reevaluated regularly, embracing optimism in the face of uncertainty.
+          - A float coefficient can be provided to tune the regularity of these reevaluations (default is 0.05). Eg: with 0.05, each evaluation has a 5% chance (i.e., 1 in 20) of being repeated (i.e., the same candidate solution is reevaluated to better estimate its performance). (Default: `None`).
+    - **n\_cores**: Number of cores to use.
+
+      stopping.maxfun: Maximum number of function evaluations.
+    - **mutation**: Type of mutation to apply. Available options are (Default: `"gaussian"`).
+          - "gaussian": Standard mutation by adding a Gaussian random variable (with progressive widening) to the best pessimistic point.
+          - "cauchy": Same as Gaussian but using a Cauchy distribution.
+          - "discrete": Mutates a randomly drawn variable (mutation occurs with probability 1/d in d dimensions, hence ~1 variable per mutation).
+          - "discreteBSO": Follows brainstorm optimization by gradually decreasing mutation rate from 1 to 1/d.
+          - "fastga": Fast Genetic Algorithm mutations from the current best.
+          - "doublefastga": Double-FastGA mutations from the current best :cite:`doerr2017`.
+          - "rls": Randomized Local Search — mutates one and only one variable.
+          - "portfolio": Random number of mutated bits, known as uniform mixing :cite:`dang2016`.
+          - "lengler": Mutation rate is a function of dimension and iteration index.
+          - "lengler{2|3|half|fourth}": Variants of the Lengler mutation rate adaptation.
+    - **sparse**: Whether to apply random mutations that set variables to zero. Default is `False`.
+    - **smoother**: Whether to suggest smooth mutations. Default is `False`.
+    - **annealing**:
+      Annealing schedule to apply to mutation amplitude or temperature-based control. Options are:
+          - "none": No annealing is applied.
+          - "Exp0.9": Exponential decay with rate 0.9.
+          - "Exp0.99": Exponential decay with rate 0.99.
+          - "Exp0.9Auto": Exponential decay with rate 0.9, auto-scaled based on problem horizon.
+          - "Lin100.0": Linear decay from 1 to 0 over 100 iterations.
+          - "Lin1.0": Linear decay from 1 to 0 over 1 iteration.
+          - "LinAuto": Linearly decaying annealing automatically scaled to the problem horizon. Default is `"none"`.
+    - **super\_radii**:
+      Whether to apply extended radii beyond standard bounds for candidate generation, enabling broader
+      exploration. Default is `False`.
+    - **roulette\_size**:
+      Size of the roulette wheel used for selection in the evolutionary process. Affects the sampling
+      diversity from past candidates. (Default: `64`)
+    - **antismooth**:
+      Degree of anti-smoothing applied to prevent premature convergence in smooth landscapes. This alters
+      the landscape by penalizing overly smooth improvements. (Default: `4`)
+    - **crossover**: Whether to include a genetic crossover step every other iteration. Default is `False`.
+    - **crossover\_type**:
+      Method used for genetic crossover between individuals in the population. Available options (Default: `"none"`):
+          - "none": No crossover is applied.
+          - "rand": Randomized selection of crossover point.
+          - "max": Crossover at the point with maximum fitness gain.
+          - "min": Crossover at the point with minimum fitness gain.
+          - "onepoint": One-point crossover, splitting the genome at a single random point.
+          - "twopoint": Two-point crossover, splitting the genome at two points and exchanging the middle section.
+    - **tabu\_length**:
+      Length of the tabu list used to prevent revisiting recently evaluated candidates in local search
+      strategies. Helps in escaping local minima. (Default: `1000`)
+    - **rotation**:
+      Whether to apply rotational transformations to the search space, promoting invariance to axis-
+      aligned structures and enhancing search performance in rotated coordinate systems. (Default:
+      `False`)
+    - **seed**: Seed for the random number generator for reproducibility.
+    - **sigma**:
+      Standard deviation for sampling initial population from N(0, σ²) in case bounds are not provided.
+```
+
+```{eval-rst}
+.. dropdown:: nevergrad_de
+
+    .. code-block::
+
+        "nevergrad_de"
+
+    Minimize a scalar function using the Differential Evolution optimizer from Nevergrad.
+    
+    Differential Evolution is typically used for continuous optimization. It uses differences between
+    points in the population for performing mutations in fruitful directions; it is therefore a kind of
+    covariance adaptation without any explicit covariance, making it very fast in high dimensions.
+
+    - **initialization**:
+      Algorithm/distribution used for initialization. Can be one of: "parametrization" (uses
+      parametrization's sample method), "LHS" (Latin Hypercube Sampling), "QR" (Quasi-Random), "QO"
+      (Quasi-Orthogonal), or "SO" (Sobol sequence).
+    - **scale**: Scale of random component of updates. Can be a float or a string.
+    - **recommendation**: Criterion for selecting the best point to recommend.
+    - **Options**: "pessimistic", "optimistic", "mean", or "noisy".
+    - **crossover**: Crossover rate or strategy. Can be:
+              - float: Fixed crossover rate
+              - "dimension": 1/dimension
+              - "random": Random uniform rate per iteration
+              - "onepoint": One-point crossover
+              - "twopoints": Two-points crossover
+              - "rotated_twopoints": Rotated two-points crossover
+              - "parametrization": Use parametrization's recombine method
+    - **F1**: Differential weight #1 (scaling factor).
+    - **F2**: Differential weight #2 (scaling factor).
+    - **popsize**: Population size. Can be an integer or one of:
+              - "standard": max(num_workers, 30)
+              - "dimension": max(num_workers, 30, dimension + 1)
+              - "large": max(num_workers, 30, 7 * dimension)
+    - **high\_speed**: If True, uses a metamodel for recommendations to speed up optimization.
+    - **stopping\_maxfun**: Maximum number of function evaluations before termination.
+    - **n\_cores**: Number of cores to use for parallel function evaluation.
+    - **seed**: Seed for the random number generator for reproducibility.
+    - **sigma**:
+      Standard deviation for sampling initial population from N(0, σ²) in case bounds are not provided.
+```
+
+```{eval-rst}
+.. dropdown:: nevergrad_bo
+
+    .. code-block::
+
+        "nevergrad_bo"
+
+    Minimize a scalar function using the Bayes Optim algorithm. BO and PCA-BO algorithms from the
+    `bayes_optim <https://github.com/wangronin/Bayesian-Optimization>`_ package PCA-BO (Principal
+    Component Analysis for Bayesian Optimization) is a dimensionality reduction technique for black-box
+    optimization. It applies PCA to the input space before performing Bayesian optimization, improving
+    efficiency in high dimensions by focusing on directions of greatest variance. This helps concentrate
+    search in informative subspaces and reduce sample complexity. :cite:`bayesoptimimpl`.
+
+    - **init\_budget**: Number of initialization algorithm steps.
+    - **pca**: Whether to use the PCA transformation, defining PCA-BO rather than standard BO.
+    - **n\_components**:
+      Number of principal axes in feature space representing directions of maximum variance in the data.
+      Represents the percentage of explained variance (e.g., 0.95 means 95% variance retained).
+    - **prop\_doe\_factor**:
+      Percentage of the initial budget used for DoE, potentially overriding `init_budget`. For
+    - **stopping\_maxfun**: Maximum number of function evaluations before termination.
+    - **n\_cores**: Number of cores to use for parallel function evaluation.
+    - **seed**: Seed for the random number generator for reproducibility.
+    - **sigma**:
+      Standard deviation for sampling initial population from N(0, σ²) in case bounds are not provided.
+```
+
+```{eval-rst}
+.. dropdown:: nevergrad_emna
+
+    .. code-block::
+
+        "nevergrad_emna"
+
+    Minimize a scalar function using the Estimation of Multivariate Normal Algorithm.
+    
+    Estimation of Multivariate Normal Algorithm (EMNA), a distribution-based evolutionary algorithm that
+    models the search space using a multivariate Gaussian. EMNA learns the full covariance matrix of the
+    Gaussian sampling distribution, resulting in a cubic time complexity w.r.t. each sampling. It is
+    highly recommended to first attempt other more advanced optimization methods for LBO. See
+    :cite:`emnaimpl`. This algorithm is quite efficient in a parallel setting, i.e. when the population
+    size is large.
+
+    - **isotropic**:
+      If True, uses an isotropic (identity covariance) Gaussian. If False, uses a separable (diagonal
+      covariance) Gaussian for greater flexibility in anisotropic landscapes.
+    - **noise\_handling**:
+      If True, returns the best individual found. If False (recommended for noisy problems), returns the
+      average of the final population to reduce noise.
+    - **population\_size\_adaptation**:
+      If True, the population size is adjusted automatically based on the optimization landscape and noise
+      level.
+    - **initial\_popsize**: Initial population size. Default: 4 x dimension..
+    - **stopping\_maxfun**: Maximum number of function evaluations before termination.
+    - **n\_cores**: Number of cores to use for parallel function evaluation.
+    - **seed**: Seed for the random number generator for reproducibility.
+    - **sigma**:
+      Standard deviation for sampling initial population from N(0, σ²) in case bounds are not provided.
+```
+
+```{eval-rst}
+.. dropdown:: nevergrad_cga
+
+    .. code-block::
+
+        "nevergrad_cga"
+
+    Minimize a scalar function using the Compact Genetic Algorithm.
+    
+    The Compact Genetic Algorithm (cGA) is a memory-efficient genetic algorithm that represents the
+    population as a probability vector over gene values. It simulates the order-one behavior of a simple
+    GA with uniform crossover, updating probabilities instead of maintaining an explicit population. cGA
+    processes each gene independently and is well-suited for large or constrained environments. For
+    details see :cite:`cgaimpl`.
+
+    - **stopping\_maxfun**: Maximum number of function evaluations before termination.
+    - **n\_cores**: Number of cores to use for parallel function evaluation.
+    - **seed**: Seed for the random number generator for reproducibility.
+    - **sigma**:
+      Standard deviation for sampling initial population from N(0, σ²) in case bounds are not provided.
+```
+
+```{eval-rst}
+.. dropdown:: nevergrad_axp
+
+    .. code-block::
+
+        "nevergrad_axp"
+
+    Minimize a scalar function using the AXPlatform algorithm.
+    
+    Adaptive experimentation platform from Facebook. Adaptive experimentation is the machine-learning
+    guided process of iteratively exploring a (possibly infinite) parameter space in order to identify
+    optimal configurations in a resource-efficient manner. It supports Bayesian optimization and bandit
+    strategies as exploration strategies. For full documentation visit `AX
+    <https://github.com/facebook/Ax>`_.
+
+    - **stopping\_maxfun**: Maximum number of function evaluations before termination.
+    - **n\_cores**: Number of cores to use for parallel function evaluation.
+    - **seed**: Seed for the random number generator for reproducibility.
+    - **sigma**:
+      Standard deviation for sampling initial population from N(0, σ²) in case bounds are not provided.
+```
+
+```{eval-rst}
+.. dropdown:: nevergrad_eda
+
+    .. code-block::
+
+        "nevergrad_eda"
+
+    Minimize a scalar function using the Estimation of distribution algorithm.
+    
+    Estimation of Distribution Algorithms (EDAs) optimize by building and sampling a probabilistic model
+    of promising solutions. Instead of using traditional variation operators like crossover or mutation,
+    EDAs update a distribution based on selected individuals and sample new candidates from it. This
+    allows efficient exploration of complex or noisy search spaces. In short, EDAs typically do not
+    directly evolve populations of search points but build probabilistic models of promising solutions
+    by repeatedly sampling and selecting points from the underlying search space. Refer :cite:`edaimpl`.
+
+    - **stopping\_maxfun**: Maximum number of function evaluations before termination.
+    - **n\_cores**: Number of cores to use for parallel function evaluation.
+    - **seed**: Seed for the random number generator for reproducibility.
+    - **sigma**:
+      Standard deviation for sampling initial population from N(0, σ²) in case bounds are not provided.
+```
+
+```{eval-rst}
+.. dropdown:: nevergrad_tbpsa
+
+    .. code-block::
+
+        "nevergrad_tbpsa"
+
+    Minimize a scalar function using the Test-based population size adaptation algorithm.
+    
+    TBPSA adapts population size based on fitness trend detection using linear regression. If no
+    significant improvement is found (via hypothesis testing), the population size is increased to
+    improve robustness in noisy settings. This method performs the best in many noisy optimization
+    problems, even in large dimensions. For more details, refer :cite:`tbpsaimpl`
+
+    - **noise\_handling**:
+      If True, returns the best individual seen so far. If False (recommended for noisy problems), returns
+      the average of the final population to reduce the effect of noise.
+    - **initial\_popsize**: Initial population size. If not specified, defaults to 4 x dimension.
+    - **stopping\_maxfun**: Maximum number of function evaluations before termination.
+    - **n\_cores**: Number of cores to use for parallel function evaluation.
+    - **seed**: Seed for the random number generator for reproducibility.
+    - **sigma**:
+      Standard deviation for sampling initial population from N(0, σ²) in case bounds are not provided.
+```
+
+```{eval-rst}
+.. dropdown:: nevergrad_randomsearch
+
+    .. code-block::
+
+        "nevergrad_randomsearch"
+
+    Minimize a scalar function using the Random Search algorithm.
+    
+    This is a one-shot optimization method, provides random suggestions.
+
+    - **middle\_point**:
+      Enforces that the first suggested point (ask) is the zero vector. i.e we add (0,0,...,0) as a first
+      point.
+    - **opposition\_mode**: Symmetrizes exploration with respect to the center.
+              - "opposite": enables full symmetry by always evaluating mirrored points.
+              - "quasi": applies randomized symmetry (less strict, more exploratory).
+              - None: disables any symmetric mirroring in the sampling process.
+    - **sampler**:
+              - "parametrization": uses the default sample() method of the parametrization, which samples uniformly within bounds or from a Gaussian.
+              - "gaussian": samples from a standard Gaussian distribution.
+              - "cauchy": uses a Cauchy distribution instead of Gaussian.
+    - **scale**: Scalar used to multiply suggested point values, or a string mode:
+              - "random": uses a randomized pattern for the scale.
+              - "auto": sigma = (1 + log(budget)) / (4 * log(dimension)); adjusts scale based on problem size.
+              - "autotune": sigma = sqrt(log(budget) / dimension); alternative auto-scaling based on budget and dimensionality.
+    - **recommendation\_rule**: Specifies how the final recommendation is chosen.
+              - "average_of_best": returns the average of top-performing candidates.
+              - "pessimistic": selects the pessimistic best (default);
+              - "average_of_exp_best": uses an exponential moving average of the best points.
+    - **stopping\_maxfun**: Maximum number of function evaluations before termination.
+    - **n\_cores**: Number of cores to use for parallel function evaluation.
+    - **seed**: Seed for the random number generator for reproducibility.
+    - **sigma**:
+      Standard deviation for sampling initial population from N(0, σ²) in case bounds are not provided.
+```
+
+```{eval-rst}
+.. dropdown:: nevergrad_samplingsearch
+
+    .. code-block::
+
+        "nevergrad_samplingsearch"
+
+    Minimize a scalar function using SamplingSearch.
+    
+    This is a one-shot optimization method, but better than random search by ensuring more uniformity.
+
+    - **sampler**: Choice of the low-discrepancy sampler used for initial points.
+              - "Halton": deterministic, well-spaced sequences
+              - "Hammersley": similar to Halton but more uniform in low dimension
+              - "LHS": Latin Hypercube Sampling; ensures coverage along each axis
+    - **scrambled**:
+      If True,  Adds scrambling to the search; much better in high dimension and rarely worse than the
+      original search.
+    - **middle\_point**:
+      If True, the first suggested point is the zero vector. Useful for initializing at the center of the
+      search space.
+    - **cauchy**:
+      If True, uses the inverse Cauchy distribution instead of Gaussian when projecting samples to real-
+      valued space (especially when no box bounds exist).
+    - **scale**: A float multiplier or "random".
+              - float: directly scales all generated points
+              - "random": uses a randomized scaling pattern for increased diversity
+    - **rescaled**: If True or a specific mode, rescales the sampling pattern.
+              - Ensures coverage of boundaries and may apply adaptive scaling
+              - Useful when original scale is too narrow or biased
+    - **recommendation\_rule**: How the final recommendation is chosen.
+              - "average_of_best": mean of the best-performing points
+              - "pessimistic": selects the point with best worst-case value (default)
+    - **stopping\_maxfun**: Maximum number of function evaluations before termination.
+    - **n\_cores**: Number of cores to use for parallel function evaluation.
+    - **seed**: Seed for the random number generator for reproducibility.
+    - **sigma**:
+      Standard deviation for sampling initial population from N(0, σ²) in case bounds are not provided. Notes
+      -----
+      - Halton is a low quality sampling method when the dimension is high; it is usually better to use Halton with scrambling.
+      - When the budget is known in advance, it is also better to replace Halton by Hammersley.
+```
+
+```{eval-rst}
+.. dropdown:: nevergrad_NGOpt
+
+    .. code-block::
+
+        "nevergrad_NGOpt"
+
+    Minimize a scalar function using a Meta Optimizer from Nevergrad. Each meta optimizer combines
+    multiples optimizers to solve a problem.
+
+    - **optimizer**: One of
+              - NGOpt
+              - NGOpt4
+              - NGOpt8
+              - NGOpt10
+              - NGOpt12
+              - NGOpt13
+              - NGOpt14
+              - NGOpt15
+              - NGOpt16
+              - NGOpt21
+              - NGOpt36
+              - NGOpt38
+              - NGOpt39
+              - NGOptRW
+              - NGOptF
+              - NGOptF2
+              - NGOptF3
+              - NGOptF5
+              - NgIoh2
+              - NgIoh3
+              - NgIoh4
+              - NgIoh5
+              - NgIoh6
+              - NgIoh7
+              - NgIoh8
+              - NgIoh9
+              - NgIoh10
+              - NgIoh11
+              - NgIoh12
+              - NgIoh13
+              - NgIoh14
+              - NgIoh15
+              - NgIoh16
+              - NgIoh17
+              - NgIoh18
+              - NgIoh19
+              - NgIoh20
+              - NgIoh21
+              - NgIoh12b
+              - NgIoh13b
+              - NgIoh14b
+              - NgIoh15b
+              - NgIohRW2
+              - NgIohTuned
+              - NgDS
+              - NgDS2
+              - NGDSRW
+              - NGO
+              - CSEC
+              - CSEC10
+              - CSEC11
+              - Wiz
+    - **stopping\_maxfun**: Maximum number of function evaluations before termination.
+    - **n\_cores**: Number of cores to use for parallel function evaluation.
+    - **seed**: Seed for the random number generator for reproducibility.
+    - **sigma**:
+      Standard deviation for sampling initial population from N(0, σ²) in case bounds are not provided.
+```
+
+```{eval-rst}
+.. dropdown:: nevergrad_meta
+
+    .. code-block::
+
+        "nevergrad_meta"
+
+    Minimize a scalar function using a Meta Optimizer from Nevergrad. Utilizes a combination of local
+    and global optimizers to find the best solution. Local optimizers like BFGS are wrappers over scipy
+    implementations. Each meta optimizer combines multiples optimizers to solve a problem.
+
+    - **optimizer**: One of
+              - MultiBFGSPlus
+              - LogMultiBFGSPlus
+              - SqrtMultiBFGSPlus
+              - MultiCobylaPlus
+              - MultiSQPPlus
+              - BFGSCMAPlus
+              - LogBFGSCMAPlus
+              - SqrtBFGSCMAPlus
+              - SQPCMAPlus
+              - LogSQPCMAPlus
+              - SqrtSQPCMAPlus
+              - MultiBFGS
+              - LogMultiBFGS
+              - SqrtMultiBFGS
+              - MultiCobyla
+              - ForceMultiCobyla
+              - MultiSQP
+              - BFGSCMA
+              - LogBFGSCMA
+              - SqrtBFGSCMA
+              - SQPCMA
+              - LogSQPCMA
+              - SqrtSQPCMA
+              - FSQPCMA
+              - F2SQPCMA
+              - F3SQPCMA
+              - MultiDiscrete
+              - CMandAS2
+              - CMandAS3
+              - MetaCMA
+              - CMA
+              - PCEDA
+              - MPCEDA
+              - MEDA
+              - NoisyBandit
+              - SPSA
+              - Shiwa
+              - Carola3
+    - **stopping\_maxfun**: Maximum number of function evaluations before termination.
+    - **n\_cores**: Number of cores to use for parallel function evaluation.
+    - **seed**: Seed for the random number generator for reproducibility.
+    - **sigma**:
+      Standard deviation for sampling initial population from N(0, σ²) in case bounds are not provided.
 ```
 
 ## References
diff --git a/docs/source/how_to/how_to_algorithm_selection.ipynb b/docs/source/how_to/how_to_algorithm_selection.ipynb
index 0dfc58307..c7078de83 100644
--- a/docs/source/how_to/how_to_algorithm_selection.ipynb
+++ b/docs/source/how_to/how_to_algorithm_selection.ipynb
@@ -345,7 +345,7 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 3 (ipykernel)",
+   "display_name": "optimagic-docs",
    "language": "python",
    "name": "python3"
   },
@@ -359,7 +359,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.10.15"
+   "version": "3.10.16"
   }
  },
  "nbformat": 4,
diff --git a/docs/source/how_to/how_to_bounds.ipynb b/docs/source/how_to/how_to_bounds.ipynb
index b87a15be2..636b088e2 100644
--- a/docs/source/how_to/how_to_bounds.ipynb
+++ b/docs/source/how_to/how_to_bounds.ipynb
@@ -240,7 +240,7 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 3 (ipykernel)",
+   "display_name": "optimagic-docs",
    "language": "python",
    "name": "python3"
   },
@@ -254,7 +254,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.10.14"
+   "version": "3.10.16"
   }
  },
  "nbformat": 4,
diff --git a/docs/source/refs.bib b/docs/source/refs.bib
index 6a52fc279..49ce545d1 100644
--- a/docs/source/refs.bib
+++ b/docs/source/refs.bib
@@ -906,6 +906,17 @@ @article{JAMES1975343
 author = {F. James and M. Roos}
 }
 
+
+@misc{Hansen2023,
+title={The CMA Evolution Strategy: A Tutorial}, 
+author={Nikolaus Hansen},
+year={2023},
+eprint={1604.00772},
+archivePrefix={arXiv},
+primaryClass={cs.LG},
+url={https://arxiv.org/abs/1604.00772}, 
+}
+
 @InProceedings{Kennedy1995,
   author={Kennedy, J. and Eberhart, R.},
   booktitle={Proceedings of ICNN'95 - International Conference on Neural Networks}, 
@@ -927,4 +938,107 @@ @InProceedings{Zambrano2013
   doi = {10.1109/CEC.2013.6557848},
 }
 
+@inbook{randomsearch2010,
+author = {Zabinsky, Zelda},
+year = {2010},
+month = {06},
+pages = {},
+title = {Random Search Algorithms},
+isbn = {9780470400531},
+doi = {10.1002/9780470400531.eorms0704}
+}
+
+@INPROCEEDINGS{spsaimpl,
+  author={Rastogi, Pushpendre and Zhu, Jingyi and Spall, James C.},
+  booktitle={2016 Annual Conference on Information Science and Systems (CISS)}, 
+  title={Efficient implementation of enhanced adaptive simultaneous perturbation algorithms}, 
+  year={2016},
+  volume={},
+  number={},
+  pages={298-303},
+  keywords={Estimation;Algorithm design and analysis;Adaptive Estimation;Simultaneous Perturbation Stochastic Approximation (SPSA);Woodbury Matrix Identity},
+  doi={10.1109/CISS.2016.7460518}}
+
+@inproceedings{tbpsaimpl,
+author = {Hellwig, Michael and Beyer, Hans-Georg},
+year = {2016},
+month = {09},
+pages = {},
+title = {Evolution under Strong Noise: A Self-Adaptive Evolution Strategy Can Reach the Lower Performance Bound - the pcCMSA-ES},
+volume = {9921},
+isbn = {9783319458229},
+doi = {10.1007/978-3-319-45823-6_3}
+}
+
+@ARTICLE{cgaimpl,
+  author={Harik, G.R. and Lobo, F.G. and Goldberg, D.E.},
+  journal={IEEE Transactions on Evolutionary Computation}, 
+  title={The compact genetic algorithm}, 
+  year={1999},
+  volume={3},
+  number={4},
+  pages={287-297},
+  keywords={Genetic algorithms;Algorithm design and analysis;Laboratories;Computer simulation;Genetic engineering;Probability distribution;Computational modeling;History;Convergence;Mathematical model},
+  doi={10.1109/4235.797971}}
+
+  @inproceedings{bayesoptimimpl,
+author = {Raponi, Elena and Wang, Hao and Bujny, Mariusz and Boria, Simonetta and Doerr, Carola},
+title = {High Dimensional Bayesian Optimization Assisted by Principal Component Analysis},
+year = {2020},
+isbn = {978-3-030-58111-4},
+publisher = {Springer-Verlag},
+address = {Berlin, Heidelberg},
+url = {https://doi.org/10.1007/978-3-030-58112-1_12},
+doi = {10.1007/978-3-030-58112-1_12},
+abstract = {Bayesian Optimization (BO) is a surrogate-assisted global optimization technique that has been successfully applied in various fields, e.g., automated machine learning and design optimization. Built upon a so-called infill-criterion and Gaussian Process regression (GPR), the BO technique suffers from a substantial computational complexity and hampered convergence rate as the dimension of the search spaces increases. Scaling up BO for high-dimensional optimization problems remains a challenging task.In this paper, we propose to tackle the scalability of BO by hybridizing it with a Principal Component Analysis (PCA), resulting in a novel PCA-assisted BO (PCA-BO) algorithm. Specifically, the PCA procedure learns a linear transformation from all the evaluated points during the run and selects dimensions in the transformed space according to the variability of evaluated points. We then construct the GPR model, and the infill-criterion in the space spanned by the selected dimensions.We assess the performance of our PCA-BO in terms of the empirical convergence rate and CPU time on multi-modal problems from the COCO benchmark framework. The experimental results show that PCA-BO can effectively reduce the CPU time incurred on high-dimensional problems, and maintains the convergence rate on problems with an adequate global structure. PCA-BO therefore provides a satisfactory trade-off between the convergence rate and computational efficiency opening new ways to benefit from the strength of BO approaches in high dimensional numerical optimization.},
+booktitle = {Parallel Problem Solving from Nature – PPSN XVI: 16th International Conference, PPSN 2020, Leiden, The Netherlands, September 5-9, 2020, Proceedings, Part I},
+pages = {169–183},
+numpages = {15},
+keywords = {Dimensionality reduction, Principal Component Analysis, Black-box optimization, Bayesian optimization},
+location = {Leiden, The Netherlands}
+}
+
+@book{Rechenberg1973,
+  author = {Rechenberg, Ingo},
+  title = {Evolutionsstrategie: Optimierung technischer Systeme nach Prinzipien der biologischen Evolution},
+  publisher = {Frommann-Holzboog Verlag},
+  year = {1973},
+  url = {https://gwern.net/doc/reinforcement-learning/exploration/1973-rechenberg.pdf},
+  address = {Stuttgart},
+  note = {[Evolution Strategy: Optimization of Technical Systems According to the Principles of Biological Evolution]}
+}
+
+@article{Schumer1968,
+  author={Schumer, M. and Steiglitz, K.},
+  journal={IEEE Transactions on Automatic Control}, 
+  title={Adaptive step size random search}, 
+  year={1968},
+  volume={13},
+  number={3},
+  pages={270-276},
+  keywords={Minimization methods;Gradient methods;Search methods;Adaptive control;Communication systems;Q measurement;Cost function;Newton method;Military computing},
+  doi={10.1109/TAC.1968.1098903}
+}
+
+@misc{edaimpl,
+      title={Theory of Estimation-of-Distribution Algorithms}, 
+      author={Martin S. Krejca and Carsten Witt},
+      year={2018},
+      eprint={1806.05392},
+      archivePrefix={arXiv},
+      primaryClass={cs.NE},
+      url={https://arxiv.org/abs/1806.05392}, 
+}
+
+@book{emnaimpl,
+author = {Larranaga, Pedro and Lozano, Jose},
+year = {2002},
+month = {01},
+pages = {},
+title = {Estimation of Distribution Algorithms: A New Tool for Evolutionary Computation},
+isbn = {9781461356042},
+journal = {Genetic algorithms and evolutionary computation ; 2},
+doi = {10.1007/978-1-4615-1539-5}
+}
+
 @Comment{jabref-meta: databaseType:bibtex;}
diff --git a/environment.yml b/environment.yml
index 0764ef7af..1ea4e3b1b 100644
--- a/environment.yml
+++ b/environment.yml
@@ -38,13 +38,15 @@ dependencies:
   - furo  # dev, docs
   - annotated-types  # dev, tests
   - iminuit  # dev, tests
+  - cma  # dev, tests
   - pip:  # dev, tests, docs
-      - nevergrad  # dev, tests
       - DFO-LS>=1.5.3  # dev, tests
       - Py-BOBYQA  # dev, tests
       - fides==0.7.4  # dev, tests
+      - nevergrad  # dev, tests
       - kaleido>=1.0  # dev, tests
       - pre-commit>=4  # dev
+      - bayes_optim  # dev, tests
       - -e .  # dev
       # type stubs
       - pandas-stubs  # dev, tests
diff --git a/pyproject.toml b/pyproject.toml
index 133b181cf..2f99b3772 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -16,7 +16,6 @@ dependencies = [
     "sqlalchemy>=1.3",
     "annotated-types",
     "typing-extensions",
-    "iminuit",
 ]
 dynamic = ["version"]
 keywords = [
diff --git a/src/optimagic/algorithms.py b/src/optimagic/algorithms.py
index 588514e95..54272cd7a 100644
--- a/src/optimagic/algorithms.py
+++ b/src/optimagic/algorithms.py
@@ -18,7 +18,21 @@
 from optimagic.optimizers.ipopt import Ipopt
 from optimagic.optimizers.nag_optimizers import NagDFOLS, NagPyBOBYQA
 from optimagic.optimizers.neldermead import NelderMeadParallel
-from optimagic.optimizers.nevergrad_optimizers import NevergradPSO
+from optimagic.optimizers.nevergrad_optimizers import (
+    NevergradBayesOptim,
+    NevergradCGA,
+    NevergradCMAES,
+    NevergradDifferentialEvolution,
+    NevergradEDA,
+    NevergradEMNA,
+    NevergradMeta,
+    NevergradNGOpt,
+    NevergradOnePlusOne,
+    NevergradPSO,
+    NevergradRandomSearch,
+    NevergradSamplingSearch,
+    NevergradTBPSA,
+)
 from optimagic.optimizers.nlopt_optimizers import (
     NloptBOBYQA,
     NloptCCSAQ,
@@ -172,7 +186,19 @@ def Scalar(
 
 @dataclass(frozen=True)
 class BoundedGlobalGradientFreeParallelScalarAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     pygmo_gaco: Type[PygmoGaco] = PygmoGaco
     pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen
     scipy_brute: Type[ScipyBrute] = ScipyBrute
@@ -366,7 +392,19 @@ def Scalar(self) -> BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithm
 
 @dataclass(frozen=True)
 class BoundedGlobalGradientFreeScalarAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM
     nlopt_direct: Type[NloptDirect] = NloptDirect
     nlopt_esch: Type[NloptESCH] = NloptESCH
@@ -406,7 +444,19 @@ def Parallel(self) -> BoundedGlobalGradientFreeParallelScalarAlgorithms:
 
 @dataclass(frozen=True)
 class BoundedGlobalGradientFreeParallelAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     pygmo_gaco: Type[PygmoGaco] = PygmoGaco
     pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen
     scipy_brute: Type[ScipyBrute] = ScipyBrute
@@ -462,7 +512,19 @@ def Scalar(self) -> GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorith
 
 @dataclass(frozen=True)
 class GlobalGradientFreeParallelScalarAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     pygmo_gaco: Type[PygmoGaco] = PygmoGaco
     pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen
     scipy_brute: Type[ScipyBrute] = ScipyBrute
@@ -610,7 +672,19 @@ def Scalar(self) -> BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorit
 
 @dataclass(frozen=True)
 class BoundedGradientFreeParallelScalarAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     pygmo_gaco: Type[PygmoGaco] = PygmoGaco
     pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen
     scipy_brute: Type[ScipyBrute] = ScipyBrute
@@ -705,7 +779,19 @@ def Scalar(self) -> BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms:
 
 @dataclass(frozen=True)
 class BoundedGlobalParallelScalarAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     pygmo_gaco: Type[PygmoGaco] = PygmoGaco
     pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen
     scipy_brute: Type[ScipyBrute] = ScipyBrute
@@ -1032,7 +1118,19 @@ def Local(self) -> GradientBasedLocalNonlinearConstrainedScalarAlgorithms:
 
 @dataclass(frozen=True)
 class BoundedGlobalGradientFreeAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM
     nlopt_direct: Type[NloptDirect] = NloptDirect
     nlopt_esch: Type[NloptESCH] = NloptESCH
@@ -1096,7 +1194,19 @@ def Scalar(self) -> GlobalGradientFreeNonlinearConstrainedScalarAlgorithms:
 
 @dataclass(frozen=True)
 class GlobalGradientFreeScalarAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM
     nlopt_direct: Type[NloptDirect] = NloptDirect
     nlopt_esch: Type[NloptESCH] = NloptESCH
@@ -1140,7 +1250,19 @@ def Parallel(self) -> GlobalGradientFreeParallelScalarAlgorithms:
 
 @dataclass(frozen=True)
 class GlobalGradientFreeParallelAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     pygmo_gaco: Type[PygmoGaco] = PygmoGaco
     pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen
     scipy_brute: Type[ScipyBrute] = ScipyBrute
@@ -1306,7 +1428,19 @@ def Scalar(self) -> BoundedGradientFreeNonlinearConstrainedScalarAlgorithms:
 @dataclass(frozen=True)
 class BoundedGradientFreeScalarAlgorithms(AlgoSelection):
     nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA
     nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA
     nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM
@@ -1378,7 +1512,19 @@ def Parallel(self) -> BoundedGradientFreeLeastSquaresParallelAlgorithms:
 
 @dataclass(frozen=True)
 class BoundedGradientFreeParallelAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     pounders: Type[Pounders] = Pounders
     pygmo_gaco: Type[PygmoGaco] = PygmoGaco
     pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen
@@ -1460,7 +1606,19 @@ def Scalar(self) -> GradientFreeNonlinearConstrainedParallelScalarAlgorithms:
 @dataclass(frozen=True)
 class GradientFreeParallelScalarAlgorithms(AlgoSelection):
     neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     pygmo_gaco: Type[PygmoGaco] = PygmoGaco
     pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen
     scipy_brute: Type[ScipyBrute] = ScipyBrute
@@ -1529,7 +1687,19 @@ def Scalar(self) -> BoundedGlobalNonlinearConstrainedScalarAlgorithms:
 
 @dataclass(frozen=True)
 class BoundedGlobalScalarAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM
     nlopt_direct: Type[NloptDirect] = NloptDirect
     nlopt_esch: Type[NloptESCH] = NloptESCH
@@ -1578,7 +1748,19 @@ def Parallel(self) -> BoundedGlobalParallelScalarAlgorithms:
 
 @dataclass(frozen=True)
 class BoundedGlobalParallelAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     pygmo_gaco: Type[PygmoGaco] = PygmoGaco
     pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen
     scipy_brute: Type[ScipyBrute] = ScipyBrute
@@ -1647,7 +1829,19 @@ def Scalar(self) -> GlobalNonlinearConstrainedParallelScalarAlgorithms:
 
 @dataclass(frozen=True)
 class GlobalParallelScalarAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     pygmo_gaco: Type[PygmoGaco] = PygmoGaco
     pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen
     scipy_brute: Type[ScipyBrute] = ScipyBrute
@@ -1882,7 +2076,19 @@ def Scalar(self) -> BoundedNonlinearConstrainedParallelScalarAlgorithms:
 
 @dataclass(frozen=True)
 class BoundedParallelScalarAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     pygmo_gaco: Type[PygmoGaco] = PygmoGaco
     pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen
     scipy_brute: Type[ScipyBrute] = ScipyBrute
@@ -2141,7 +2347,19 @@ def Local(self) -> GradientBasedLikelihoodLocalAlgorithms:
 
 @dataclass(frozen=True)
 class GlobalGradientFreeAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM
     nlopt_direct: Type[NloptDirect] = NloptDirect
     nlopt_esch: Type[NloptESCH] = NloptESCH
@@ -2229,7 +2447,19 @@ def Scalar(self) -> GradientFreeLocalScalarAlgorithms:
 class BoundedGradientFreeAlgorithms(AlgoSelection):
     nag_dfols: Type[NagDFOLS] = NagDFOLS
     nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA
     nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA
     nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM
@@ -2326,7 +2556,19 @@ def Scalar(self) -> GradientFreeNonlinearConstrainedScalarAlgorithms:
 class GradientFreeScalarAlgorithms(AlgoSelection):
     nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA
     neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA
     nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA
     nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM
@@ -2407,7 +2649,19 @@ def Parallel(self) -> GradientFreeLeastSquaresParallelAlgorithms:
 @dataclass(frozen=True)
 class GradientFreeParallelAlgorithms(AlgoSelection):
     neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     pounders: Type[Pounders] = Pounders
     pygmo_gaco: Type[PygmoGaco] = PygmoGaco
     pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen
@@ -2447,7 +2701,19 @@ def Scalar(self) -> GradientFreeParallelScalarAlgorithms:
 
 @dataclass(frozen=True)
 class BoundedGlobalAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM
     nlopt_direct: Type[NloptDirect] = NloptDirect
     nlopt_esch: Type[NloptESCH] = NloptESCH
@@ -2529,7 +2795,19 @@ def Scalar(self) -> GlobalNonlinearConstrainedScalarAlgorithms:
 
 @dataclass(frozen=True)
 class GlobalScalarAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM
     nlopt_direct: Type[NloptDirect] = NloptDirect
     nlopt_esch: Type[NloptESCH] = NloptESCH
@@ -2582,7 +2860,19 @@ def Parallel(self) -> GlobalParallelScalarAlgorithms:
 
 @dataclass(frozen=True)
 class GlobalParallelAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     pygmo_gaco: Type[PygmoGaco] = PygmoGaco
     pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen
     scipy_brute: Type[ScipyBrute] = ScipyBrute
@@ -2847,7 +3137,19 @@ class BoundedScalarAlgorithms(AlgoSelection):
     iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad
     ipopt: Type[Ipopt] = Ipopt
     nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA
     nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ
     nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA
@@ -2948,7 +3250,19 @@ def Parallel(self) -> BoundedLeastSquaresParallelAlgorithms:
 
 @dataclass(frozen=True)
 class BoundedParallelAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     pounders: Type[Pounders] = Pounders
     pygmo_gaco: Type[PygmoGaco] = PygmoGaco
     pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen
@@ -3050,7 +3364,19 @@ def Scalar(self) -> NonlinearConstrainedParallelScalarAlgorithms:
 @dataclass(frozen=True)
 class ParallelScalarAlgorithms(AlgoSelection):
     neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     pygmo_gaco: Type[PygmoGaco] = PygmoGaco
     pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen
     scipy_brute: Type[ScipyBrute] = ScipyBrute
@@ -3158,7 +3484,19 @@ class GradientFreeAlgorithms(AlgoSelection):
     nag_dfols: Type[NagDFOLS] = NagDFOLS
     nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA
     neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA
     nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA
     nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM
@@ -3229,7 +3567,19 @@ def Scalar(self) -> GradientFreeScalarAlgorithms:
 
 @dataclass(frozen=True)
 class GlobalAlgorithms(AlgoSelection):
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM
     nlopt_direct: Type[NloptDirect] = NloptDirect
     nlopt_esch: Type[NloptESCH] = NloptESCH
@@ -3363,7 +3713,19 @@ class BoundedAlgorithms(AlgoSelection):
     ipopt: Type[Ipopt] = Ipopt
     nag_dfols: Type[NagDFOLS] = NagDFOLS
     nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA
     nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ
     nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA
@@ -3500,7 +3862,19 @@ class ScalarAlgorithms(AlgoSelection):
     ipopt: Type[Ipopt] = Ipopt
     nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA
     neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA
     nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ
     nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA
@@ -3629,7 +4003,19 @@ def Local(self) -> LikelihoodLocalAlgorithms:
 @dataclass(frozen=True)
 class ParallelAlgorithms(AlgoSelection):
     neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     pounders: Type[Pounders] = Pounders
     pygmo_gaco: Type[PygmoGaco] = PygmoGaco
     pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen
@@ -3678,7 +4064,19 @@ class Algorithms(AlgoSelection):
     nag_dfols: Type[NagDFOLS] = NagDFOLS
     nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA
     neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel
+    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim
+    nevergrad_cga: Type[NevergradCGA] = NevergradCGA
+    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES
+    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution
+    nevergrad_eda: Type[NevergradEDA] = NevergradEDA
+    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA
+    nevergrad_meta: Type[NevergradMeta] = NevergradMeta
+    nevergrad_NGOpt: Type[NevergradNGOpt] = NevergradNGOpt
+    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne
     nevergrad_pso: Type[NevergradPSO] = NevergradPSO
+    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch
+    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch
+    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA
     nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA
     nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ
     nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA
diff --git a/src/optimagic/optimizers/nevergrad_optimizers.py b/src/optimagic/optimizers/nevergrad_optimizers.py
index 10003a0b7..669ae0d40 100644
--- a/src/optimagic/optimizers/nevergrad_optimizers.py
+++ b/src/optimagic/optimizers/nevergrad_optimizers.py
@@ -1,8 +1,8 @@
-"""Implement nevergrad optimizers."""
+"""Implement optimizers from the nevergrad package."""
 
 import math
 from dataclasses import dataclass
-from typing import Literal
+from typing import Any, Literal
 
 import numpy as np
 from numpy.typing import NDArray
@@ -10,17 +10,37 @@
 from optimagic import mark
 from optimagic.config import IS_NEVERGRAD_INSTALLED
 from optimagic.exceptions import NotInstalledError
-from optimagic.optimization.algo_options import STOPPING_MAXFUN_GLOBAL
+from optimagic.optimization.algo_options import (
+    CONVERGENCE_FTOL_ABS,
+    CONVERGENCE_FTOL_REL,
+    CONVERGENCE_XTOL_ABS,
+    STOPPING_MAXFUN_GLOBAL,
+    STOPPING_MAXITER,
+)
 from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult
 from optimagic.optimization.internal_optimization_problem import (
     InternalOptimizationProblem,
 )
-from optimagic.typing import AggregationLevel, PositiveInt
+from optimagic.typing import (
+    AggregationLevel,
+    NonNegativeFloat,
+    NonNegativeInt,
+    PositiveFloat,
+    PositiveInt,
+)
 
 if IS_NEVERGRAD_INSTALLED:
     import nevergrad as ng
 
 
+NEVERGRAD_NOT_INSTALLED_ERROR = (
+    "This optimizer requires the 'nevergrad' package to be installed. "
+    "You can install it with `pip install nevergrad`. "
+    "Visit https://facebookresearch.github.io/nevergrad/getting_started.html "
+    "for more detailed installation instructions."
+)
+
+
 @mark.minimizer(
     name="nevergrad_pso",
     solver_type=AggregationLevel.SCALAR,
@@ -40,35 +60,22 @@ class NevergradPSO(Algorithm):
     population_size: int | None = None
     n_cores: int = 1
     seed: int | None = None
-    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL * 2  # imprecise algorithm
+    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL
     inertia: float = 0.5 / math.log(2.0)
     cognitive: float = 0.5 + math.log(2.0)
     social: float = 0.5 + math.log(2.0)
     quasi_opp_init: bool = False
     speed_quasi_opp_init: bool = False
     special_speed_quasi_opp_init: bool = False
+    sigma: float | None = None
 
     def _solve_internal_problem(
         self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
     ) -> InternalOptimizeResult:
         if not IS_NEVERGRAD_INSTALLED:
-            raise NotInstalledError(
-                "The nevergrad_pso optimizer requires the 'nevergrad' package to be "
-                "installed. You can install it with `pip install nevergrad`. "
-                "Visit https://facebookresearch.github.io/nevergrad/getting_started.html"
-                " for more detailed installation instructions."
-            )
+            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)
 
-        instrum = ng.p.Instrumentation(
-            ng.p.Array(
-                shape=x0.shape, lower=problem.bounds.lower, upper=problem.bounds.upper
-            )
-        )
-
-        if self.seed is not None:
-            instrum.random_state.seed(self.seed)
-
-        optimizer = ng.optimizers.ConfPSO(
+        configured_optimizer = ng.optimizers.ConfPSO(
             transform=self.transform,
             popsize=self.population_size,
             omega=self.inertia,
@@ -77,34 +84,946 @@ def _solve_internal_problem(
             qo=self.quasi_opp_init,
             sqo=self.speed_quasi_opp_init,
             so=self.special_speed_quasi_opp_init,
-        )(
-            parametrization=instrum,
-            budget=self.stopping_maxfun,
-            num_workers=self.n_cores,
-        )
-
-        while optimizer.num_ask < self.stopping_maxfun:
-            x_list = [
-                optimizer.ask()
-                for _ in range(
-                    min(self.n_cores, self.stopping_maxfun - optimizer.num_ask)
-                )
-            ]
-            losses = problem.batch_fun(
-                [x.value[0][0] for x in x_list], n_cores=self.n_cores
-            )
+        )
+
+        res = _nevergrad_internal(
+            problem=problem,
+            x0=x0,
+            configured_optimizer=configured_optimizer,
+            stopping_maxfun=self.stopping_maxfun,
+            n_cores=self.n_cores,
+            seed=self.seed,
+            sigma=self.sigma,
+            nonlinear_constraints=problem.nonlinear_constraints,
+        )
+
+        return res
+
+
+@mark.minimizer(
+    name="nevergrad_cmaes",
+    solver_type=AggregationLevel.SCALAR,
+    is_available=IS_NEVERGRAD_INSTALLED,
+    is_global=True,
+    needs_jac=False,
+    needs_hess=False,
+    supports_parallelism=True,
+    supports_bounds=True,
+    supports_linear_constraints=False,
+    supports_nonlinear_constraints=False,
+    disable_history=False,
+)
+@dataclass(frozen=True)
+class NevergradCMAES(Algorithm):
+    scale: NonNegativeFloat = 1.0
+    elitist: bool = False
+    population_size: int | None = None
+    diagonal: bool = False
+    high_speed: bool = False
+    fast_cmaes: bool = False
+    random_init: bool = False
+    n_cores: PositiveInt = 1
+    step_size_adaptive: bool | str = True
+    CSA_dampfac: PositiveFloat = 1.0
+    CMA_dampsvec_fade: PositiveFloat = 0.1
+    CSA_squared: bool = False
+    CMA_on: float = 1.0
+    CMA_rankone: float = 1.0
+    CMA_rankmu: float = 1.0
+    CMA_cmean: float = 1.0
+    CMA_diagonal_decoding: float = 0.0
+    num_parents: int | None = None
+    CMA_active: bool = True
+    CMA_mirrormethod: Literal[0, 1, 2] = 2
+    CMA_const_trace: bool | Literal["arithm", "geom", "aeig", "geig"] = False
+    CMA_diagonal: int | bool = False
+    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL
+    stopping_maxiter: PositiveInt = STOPPING_MAXITER
+    stopping_maxtime: PositiveFloat = float("inf")
+    stopping_cov_mat_cond: NonNegativeFloat = 1e14
+    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS
+    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL
+    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS
+    convergence_iter_noimprove: PositiveInt | None = None
+    invariant_path: bool = False
+    eval_final_mean: bool = True
+    seed: int | None = None
+    sigma: float | None = None
+
+    def _solve_internal_problem(
+        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
+    ) -> InternalOptimizeResult:
+        if not IS_NEVERGRAD_INSTALLED:
+            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)
+
+        cma_options = {
+            "AdaptSigma": self.step_size_adaptive,
+            "CSA_dampfac": self.CSA_dampfac,
+            "CMA_dampsvec_fade": self.CMA_dampsvec_fade,
+            "CSA_squared": self.CSA_squared,
+            "CSA_invariant_path": self.invariant_path,
+            "CMA_on": self.CMA_on,
+            "CMA_rankone": self.CMA_rankone,
+            "CMA_rankmu": self.CMA_rankmu,
+            "CMA_cmean": self.CMA_cmean,
+            "CMA_diagonal_decoding": self.CMA_diagonal_decoding,
+            "CMA_mu": self.num_parents,
+            "CMA_active": self.CMA_active,
+            "CMA_mirrormethod": self.CMA_mirrormethod,
+            "CMA_const_trace": self.CMA_const_trace,
+            "CMA_diagonal": self.CMA_diagonal,
+            "maxfevals": self.stopping_maxfun,
+            "maxiter": self.stopping_maxiter,
+            "timeout": self.stopping_maxtime,
+            "tolconditioncov": self.stopping_cov_mat_cond,
+            "tolfun": self.convergence_ftol_abs,
+            "tolfunrel": self.convergence_ftol_rel,
+            "tolx": self.convergence_xtol_abs,
+            "tolstagnation": self.convergence_iter_noimprove,
+            "eval_final_mean": self.eval_final_mean,
+        }
+
+        configured_optimizer = ng.optimizers.ParametrizedCMA(
+            scale=self.scale,
+            popsize=self.population_size,
+            elitist=self.elitist,
+            diagonal=self.diagonal,
+            high_speed=self.high_speed,
+            fcmaes=self.fast_cmaes,
+            inopts=cma_options,
+        )
+
+        res = _nevergrad_internal(
+            problem=problem,
+            x0=x0,
+            configured_optimizer=configured_optimizer,
+            stopping_maxfun=self.stopping_maxfun,
+            n_cores=self.n_cores,
+            seed=self.seed,
+            sigma=self.sigma,
+            nonlinear_constraints=problem.nonlinear_constraints,
+        )
+        return res
+
+
+@mark.minimizer(
+    name="nevergrad_oneplusone",
+    solver_type=AggregationLevel.SCALAR,
+    is_available=IS_NEVERGRAD_INSTALLED,
+    is_global=True,
+    needs_jac=False,
+    needs_hess=False,
+    supports_parallelism=True,
+    supports_bounds=True,
+    supports_linear_constraints=False,
+    supports_nonlinear_constraints=False,
+    disable_history=False,
+)
+@dataclass(frozen=True)
+class NevergradOnePlusOne(Algorithm):
+    noise_handling: (
+        Literal["random", "optimistic"]
+        | tuple[Literal["random", "optimistic"], float]
+        | None
+    ) = None
+    mutation: Literal[
+        "gaussian",
+        "cauchy",
+        "discrete",
+        "fastga",
+        "rls",
+        "doublefastga",
+        "adaptive",
+        "coordinatewise_adaptive",
+        "portfolio",
+        "discreteBSO",
+        "lengler",
+        "lengler2",
+        "lengler3",
+        "lenglerhalf",
+        "lenglerfourth",
+        "doerr",
+        "lognormal",
+        "xlognormal",
+        "xsmalllognormal",
+        "tinylognormal",
+        "smalllognormal",
+        "biglognormal",
+        "hugelognormal",
+    ] = "gaussian"
+    annealing: (
+        Literal[
+            "none", "Exp0.9", "Exp0.99", "Exp0.9Auto", "Lin100.0", "Lin1.0", "LinAuto"
+        ]
+        | None
+    ) = None
+    sparse: bool = False
+    super_radii: bool = False
+    smoother: bool = False
+    roulette_size: PositiveInt = 64
+    antismooth: NonNegativeInt = 4
+    crossover: bool = False
+    crossover_type: (
+        Literal["none", "rand", "max", "min", "onepoint", "twopoint"] | None
+    ) = None
+    tabu_length: NonNegativeInt = 1000
+    rotation: bool = False
+    seed: int | None = None
+    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL
+    n_cores: PositiveInt = 1
+    sigma: float | None = None
+
+    def _solve_internal_problem(
+        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
+    ) -> InternalOptimizeResult:
+        if not IS_NEVERGRAD_INSTALLED:
+            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)
+
+        configured_optimizer = ng.optimizers.ParametrizedOnePlusOne(
+            noise_handling=self.noise_handling,
+            mutation=self.mutation,
+            crossover=self.crossover,
+            rotation=self.rotation,
+            annealing=self.annealing or "none",
+            sparse=self.sparse,
+            smoother=self.smoother,
+            super_radii=self.super_radii,
+            roulette_size=self.roulette_size,
+            antismooth=self.antismooth,
+            crossover_type=self.crossover_type or "none",
+        )
+
+        res = _nevergrad_internal(
+            problem=problem,
+            x0=x0,
+            configured_optimizer=configured_optimizer,
+            stopping_maxfun=self.stopping_maxfun,
+            n_cores=self.n_cores,
+            seed=self.seed,
+            sigma=self.sigma,
+            nonlinear_constraints=problem.nonlinear_constraints,
+        )
+
+        return res
+
+
+@mark.minimizer(
+    name="nevergrad_de",
+    solver_type=AggregationLevel.SCALAR,
+    is_available=IS_NEVERGRAD_INSTALLED,
+    is_global=True,
+    needs_jac=False,
+    needs_hess=False,
+    supports_parallelism=True,
+    supports_bounds=True,
+    supports_linear_constraints=False,
+    supports_nonlinear_constraints=False,
+    disable_history=False,
+)
+@dataclass(frozen=True)
+class NevergradDifferentialEvolution(Algorithm):
+    initialization: Literal["parametrization", "LHS", "QR", "QO", "SO"] = (
+        "parametrization"
+    )
+    scale: float | str = 1.0
+    recommendation: Literal["pessimistic", "optimistic", "mean", "noisy"] = (
+        "pessimistic"
+    )
+    crossover: (
+        float
+        | Literal[
+            "dimension",
+            "random",
+            "onepoint",
+            "twopoints",
+            "rotated_twopoints",
+            "parametrization",
+        ]
+    ) = 0.5
+    F1: PositiveFloat = 0.8
+    F2: PositiveFloat = 0.8
+    population_size: int | Literal["standard", "dimension", "large"] = "standard"
+    high_speed: bool = False
+    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL
+    n_cores: PositiveInt = 1
+    seed: int | None = None
+    sigma: float | None = None
+
+    def _solve_internal_problem(
+        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
+    ) -> InternalOptimizeResult:
+        if not IS_NEVERGRAD_INSTALLED:
+            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)
+
+        configured_optimizer = ng.optimizers.DifferentialEvolution(
+            scale=self.scale,
+            recommendation=self.recommendation,
+            crossover=self.crossover,
+            F1=self.F1,
+            F2=self.F2,
+            popsize=self.population_size,
+            high_speed=self.high_speed,
+        )
+
+        res = _nevergrad_internal(
+            problem=problem,
+            x0=x0,
+            configured_optimizer=configured_optimizer,
+            stopping_maxfun=self.stopping_maxfun,
+            n_cores=self.n_cores,
+            seed=self.seed,
+            sigma=self.sigma,
+            nonlinear_constraints=problem.nonlinear_constraints,
+        )
+        return res
+
+
+@mark.minimizer(
+    name="nevergrad_bo",
+    solver_type=AggregationLevel.SCALAR,
+    is_available=IS_NEVERGRAD_INSTALLED,
+    is_global=True,
+    needs_jac=False,
+    needs_hess=False,
+    supports_parallelism=True,
+    supports_bounds=True,
+    supports_linear_constraints=False,
+    supports_nonlinear_constraints=False,
+    disable_history=False,
+)
+@dataclass(frozen=True)
+class NevergradBayesOptim(Algorithm):
+    init_budget: int | None = None
+    pca: bool = False
+    n_components: NonNegativeFloat = 0.95
+    prop_doe_factor: NonNegativeFloat | None = 1
+    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL
+    n_cores: PositiveInt = 1
+    seed: int | None = None
+    sigma: int | None = None
+
+    def _solve_internal_problem(
+        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
+    ) -> InternalOptimizeResult:
+        if not IS_NEVERGRAD_INSTALLED:
+            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)
+
+        configured_optimizer = ng.optimizers.BayesOptim(
+            init_budget=self.init_budget,
+            pca=self.pca,
+            n_components=self.n_components,
+            prop_doe_factor=self.prop_doe_factor,
+        )
+
+        res = _nevergrad_internal(
+            problem=problem,
+            x0=x0,
+            configured_optimizer=configured_optimizer,
+            stopping_maxfun=self.stopping_maxfun,
+            n_cores=self.n_cores,
+            seed=self.seed,
+            sigma=self.sigma,
+            nonlinear_constraints=problem.nonlinear_constraints,
+        )
+        return res
+
+
+@mark.minimizer(
+    name="nevergrad_emna",
+    solver_type=AggregationLevel.SCALAR,
+    is_available=IS_NEVERGRAD_INSTALLED,
+    is_global=True,
+    needs_jac=False,
+    needs_hess=False,
+    supports_parallelism=True,
+    supports_bounds=True,
+    supports_linear_constraints=False,
+    supports_nonlinear_constraints=False,
+    disable_history=False,
+)
+@dataclass(frozen=True)
+class NevergradEMNA(Algorithm):
+    isotropic: bool = True
+    noise_handling: bool = True
+    population_size_adaptation: bool = False
+    initial_popsize: int | None = None
+    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL
+    n_cores: PositiveInt = 1
+    seed: int | None = None
+    sigma: float | None = None
+
+    def _solve_internal_problem(
+        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
+    ) -> InternalOptimizeResult:
+        if not IS_NEVERGRAD_INSTALLED:
+            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)
+
+        configured_optimizer = ng.optimizers.EMNA(
+            isotropic=self.isotropic,
+            naive=self.noise_handling,
+            population_size_adaptation=self.population_size_adaptation,
+            initial_popsize=self.initial_popsize,
+        )
+
+        res = _nevergrad_internal(
+            problem=problem,
+            x0=x0,
+            configured_optimizer=configured_optimizer,
+            stopping_maxfun=self.stopping_maxfun,
+            n_cores=self.n_cores,
+            seed=self.seed,
+            sigma=self.sigma,
+            nonlinear_constraints=problem.nonlinear_constraints,
+        )
+        return res
+
+
+@mark.minimizer(
+    name="nevergrad_cga",
+    solver_type=AggregationLevel.SCALAR,
+    is_available=IS_NEVERGRAD_INSTALLED,
+    is_global=True,
+    needs_jac=False,
+    needs_hess=False,
+    supports_parallelism=True,
+    supports_bounds=True,
+    supports_linear_constraints=False,
+    supports_nonlinear_constraints=False,
+    disable_history=False,
+)
+@dataclass(frozen=True)
+class NevergradCGA(Algorithm):
+    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL
+    n_cores: PositiveInt = 1
+    seed: int | None = None
+    sigma: float | None = None
+
+    def _solve_internal_problem(
+        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
+    ) -> InternalOptimizeResult:
+        if not IS_NEVERGRAD_INSTALLED:
+            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)
+
+        configured_optimizer = ng.optimizers.cGA
+
+        res = _nevergrad_internal(
+            problem=problem,
+            x0=x0,
+            configured_optimizer=configured_optimizer,
+            stopping_maxfun=self.stopping_maxfun,
+            n_cores=self.n_cores,
+            seed=self.seed,
+            sigma=self.sigma,
+            nonlinear_constraints=problem.nonlinear_constraints,
+        )
+        return res
+
+
+@mark.minimizer(
+    name="nevergrad_eda",
+    solver_type=AggregationLevel.SCALAR,
+    is_available=IS_NEVERGRAD_INSTALLED,
+    is_global=True,
+    needs_jac=False,
+    needs_hess=False,
+    supports_parallelism=True,
+    supports_bounds=True,
+    supports_linear_constraints=False,
+    supports_nonlinear_constraints=False,
+    disable_history=False,
+)
+@dataclass(frozen=True)
+class NevergradEDA(Algorithm):
+    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL
+    n_cores: PositiveInt = 1
+    seed: int | None = None
+    sigma: float | None = None
+
+    def _solve_internal_problem(
+        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
+    ) -> InternalOptimizeResult:
+        if not IS_NEVERGRAD_INSTALLED:
+            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)
+
+        configured_optimizer = ng.optimizers.EDA
+
+        res = _nevergrad_internal(
+            problem=problem,
+            x0=x0,
+            configured_optimizer=configured_optimizer,
+            stopping_maxfun=self.stopping_maxfun,
+            n_cores=self.n_cores,
+            seed=self.seed,
+            sigma=self.sigma,
+            nonlinear_constraints=problem.nonlinear_constraints,
+        )
+        return res
+
+
+@mark.minimizer(
+    name="nevergrad_tbpsa",
+    solver_type=AggregationLevel.SCALAR,
+    is_available=IS_NEVERGRAD_INSTALLED,
+    is_global=True,
+    needs_jac=False,
+    needs_hess=False,
+    supports_parallelism=True,
+    supports_bounds=True,
+    supports_linear_constraints=False,
+    supports_nonlinear_constraints=False,
+    disable_history=False,
+)
+@dataclass(frozen=True)
+class NevergradTBPSA(Algorithm):
+    noise_handling: bool = True
+    initial_popsize: int | None = None
+    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL
+    n_cores: PositiveInt = 1
+    seed: int | None = None
+    sigma: float | None = None
+
+    def _solve_internal_problem(
+        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
+    ) -> InternalOptimizeResult:
+        if not IS_NEVERGRAD_INSTALLED:
+            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)
+
+        configured_optimizer = ng.optimizers.ParametrizedTBPSA(
+            naive=self.noise_handling,
+            initial_popsize=self.initial_popsize,
+        )
+
+        res = _nevergrad_internal(
+            problem=problem,
+            x0=x0,
+            configured_optimizer=configured_optimizer,
+            stopping_maxfun=self.stopping_maxfun,
+            n_cores=self.n_cores,
+            seed=self.seed,
+            sigma=self.sigma,
+            nonlinear_constraints=problem.nonlinear_constraints,
+        )
+        return res
+
+
+@mark.minimizer(
+    name="nevergrad_randomsearch",
+    solver_type=AggregationLevel.SCALAR,
+    is_available=IS_NEVERGRAD_INSTALLED,
+    is_global=True,
+    needs_jac=False,
+    needs_hess=False,
+    supports_parallelism=True,
+    supports_bounds=True,
+    supports_linear_constraints=False,
+    supports_nonlinear_constraints=False,
+    disable_history=False,
+)
+@dataclass(frozen=True)
+class NevergradRandomSearch(Algorithm):
+    middle_point: bool = False
+    opposition_mode: Literal["opposite", "quasi"] | None = None
+    sampler: Literal["parametrization", "gaussian", "cauchy"] = "parametrization"
+    scale: PositiveFloat | Literal["random", "auto", "autotune"] = "auto"
+    recommendation_rule: Literal[
+        "average_of_best", "pessimistic", "average_of_exp_best"
+    ] = "pessimistic"
+    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL
+    n_cores: PositiveInt = 1
+    sigma: float | None = None
+
+    def _solve_internal_problem(
+        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
+    ) -> InternalOptimizeResult:
+        if not IS_NEVERGRAD_INSTALLED:
+            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)
+
+        configured_optimizer = ng.optimizers.RandomSearchMaker(
+            stupid=False,
+            middle_point=self.middle_point,
+            opposition_mode=self.opposition_mode,
+            sampler=self.sampler,
+            scale=self.scale,
+            recommendation_rule=self.recommendation_rule,
+        )
+
+        res = _nevergrad_internal(
+            problem=problem,
+            x0=x0,
+            configured_optimizer=configured_optimizer,
+            stopping_maxfun=self.stopping_maxfun,
+            n_cores=self.n_cores,
+            seed=None,
+            sigma=self.sigma,
+            nonlinear_constraints=problem.nonlinear_constraints,
+        )
+        return res
+
+
+@mark.minimizer(
+    name="nevergrad_samplingsearch",
+    solver_type=AggregationLevel.SCALAR,
+    is_available=IS_NEVERGRAD_INSTALLED,
+    is_global=True,
+    needs_jac=False,
+    needs_hess=False,
+    supports_parallelism=True,
+    supports_bounds=True,
+    supports_linear_constraints=False,
+    supports_nonlinear_constraints=False,
+    disable_history=False,
+)
+@dataclass(frozen=True)
+class NevergradSamplingSearch(Algorithm):
+    sampler: Literal["Halton", "LHS", "Hammersley"] = "Halton"
+    scrambled: bool = False
+    middle_point: bool = False
+    cauchy: bool = False
+    scale: bool | NonNegativeFloat = 1.0
+    rescaled: bool = False
+    recommendation_rule: Literal["average_of_best", "pessimistic"] = "pessimistic"
+    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL
+    n_cores: PositiveInt = 1
+    seed: int | None = None
+    sigma: float | None = None
+
+    def _solve_internal_problem(
+        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
+    ) -> InternalOptimizeResult:
+        if not IS_NEVERGRAD_INSTALLED:
+            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)
+
+        configured_optimizer = ng.optimizers.SamplingSearch(
+            sampler=self.sampler,
+            scrambled=self.scrambled,
+            middle_point=self.middle_point,
+            cauchy=self.cauchy,
+            scale=self.scale,
+            rescaled=self.rescaled,
+            recommendation_rule=self.recommendation_rule,
+        )
+
+        res = _nevergrad_internal(
+            problem=problem,
+            x0=x0,
+            configured_optimizer=configured_optimizer,
+            stopping_maxfun=self.stopping_maxfun,
+            n_cores=self.n_cores,
+            seed=None,
+            sigma=self.sigma,
+            nonlinear_constraints=problem.nonlinear_constraints,
+        )
+        return res
+
+
+@mark.minimizer(
+    name="nevergrad_NGOpt",
+    solver_type=AggregationLevel.SCALAR,
+    is_available=IS_NEVERGRAD_INSTALLED,
+    is_global=True,
+    needs_jac=False,
+    needs_hess=False,
+    supports_parallelism=True,
+    supports_bounds=True,
+    supports_linear_constraints=False,
+    supports_nonlinear_constraints=False,
+    disable_history=False,
+)
+@dataclass(frozen=True)
+class NevergradNGOpt(Algorithm):
+    optimizer: Literal[
+        "NGOpt",
+        "NGOpt4",
+        "NGOpt8",
+        "NGOpt10",
+        "NGOpt12",
+        "NGOpt13",
+        "NGOpt14",
+        "NGOpt15",
+        "NGOpt16",
+        "NGOpt21",
+        "NGOpt36",
+        "NGOpt38",
+        "NGOpt39",
+        "NGOptRW",
+        "NGOptF",
+        "NGOptF2",
+        "NGOptF3",
+        "NGOptF5",
+        "NgIoh2",
+        "NgIoh3",
+        "NgIoh4",
+        "NgIoh5",
+        "NgIoh6",
+        "NgIoh7",
+        "NgIoh11",
+        "NgIoh14",
+        "NgIoh13",
+        "NgIoh15",
+        "NgIoh12",
+        "NgIoh16",
+        "NgIoh17",
+        "NgIoh21",
+        "NgIoh20",
+        "NgIoh19",
+        "NgIoh18",
+        "NgIoh10",
+        "NgIoh9",
+        "NgIoh8",
+        "NgIoh12b",
+        "NgIoh13b",
+        "NgIoh14b",
+        "NgIoh15b",
+        "NgDS",
+        "NgDS2",
+        "NGDSRW",
+        "NGO",
+        "NgIohRW2",
+        "NgIohTuned",
+        "CSEC",
+        "CSEC10",
+        "CSEC11",
+        "Wiz",
+    ] = "NGOpt"
+    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL
+    n_cores: PositiveInt = 1
+    seed: int | None = None
+    sigma: float | None = None
+
+    def _solve_internal_problem(
+        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
+    ) -> InternalOptimizeResult:
+        if not IS_NEVERGRAD_INSTALLED:
+            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)
+
+        configured_optimizer = getattr(ng.optimizers, self.optimizer)
+
+        res = _nevergrad_internal(
+            problem=problem,
+            x0=x0,
+            configured_optimizer=configured_optimizer,
+            stopping_maxfun=self.stopping_maxfun,
+            n_cores=self.n_cores,
+            seed=self.seed,
+            sigma=self.sigma,
+            nonlinear_constraints=problem.nonlinear_constraints,
+        )
+
+        return res
+
+
+@mark.minimizer(
+    name="nevergrad_meta",
+    solver_type=AggregationLevel.SCALAR,
+    is_available=IS_NEVERGRAD_INSTALLED,
+    is_global=True,
+    needs_jac=False,
+    needs_hess=False,
+    supports_parallelism=True,
+    supports_bounds=True,
+    supports_linear_constraints=False,
+    supports_nonlinear_constraints=False,
+    disable_history=False,
+)
+@dataclass(frozen=True)
+class NevergradMeta(Algorithm):
+    optimizer: Literal[
+        "MultiBFGSPlus",
+        "LogMultiBFGSPlus",
+        "SqrtMultiBFGSPlus",
+        "MultiCobylaPlus",
+        "MultiSQPPlus",
+        "BFGSCMAPlus",
+        "LogBFGSCMAPlus",
+        "SqrtBFGSCMAPlus",
+        "SQPCMAPlus",
+        "LogSQPCMAPlus",
+        "SqrtSQPCMAPlus",
+        "MultiBFGS",
+        "LogMultiBFGS",
+        "SqrtMultiBFGS",
+        "MultiCobyla",
+        "ForceMultiCobyla",
+        "MultiSQP",
+        "BFGSCMA",
+        "LogBFGSCMA",
+        "SqrtBFGSCMA",
+        "SQPCMA",
+        "LogSQPCMA",
+        "SqrtSQPCMA",
+        "FSQPCMA",
+        "F2SQPCMA",
+        "F3SQPCMA",
+        "MultiDiscrete",
+        "CMandAS2",
+        "CMandAS3",
+        "MetaCMA",
+        "CMA",
+        "PCEDA",
+        "MPCEDA",
+        "MEDA",
+        "NoisyBandit",
+        "Shiwa",
+        "Carola3",
+    ] = "Shiwa"
+    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL
+    n_cores: PositiveInt = 1
+    seed: int | None = None
+    sigma: float | None = None
+
+    def _solve_internal_problem(
+        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
+    ) -> InternalOptimizeResult:
+        if not IS_NEVERGRAD_INSTALLED:
+            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)
+
+        configured_optimizer = getattr(ng.optimizers, self.optimizer)
+
+        res = _nevergrad_internal(
+            problem=problem,
+            x0=x0,
+            configured_optimizer=configured_optimizer,
+            stopping_maxfun=self.stopping_maxfun,
+            n_cores=self.n_cores,
+            seed=self.seed,
+            sigma=self.sigma,
+            nonlinear_constraints=problem.nonlinear_constraints,
+        )
+
+        return res
+
+
+def _nevergrad_internal(
+    problem: InternalOptimizationProblem,
+    x0: NDArray[np.float64],
+    n_cores: int,
+    configured_optimizer: "ng.optimization.base.ConfiguredOptimizer",
+    stopping_maxfun: int,
+    seed: int | None,
+    sigma: float | None,
+    nonlinear_constraints: list[dict[str, Any]] | None,
+) -> InternalOptimizeResult:
+    """Internal helper function for nevergrad.
+
+    Handle the optimization loop.
+
+    Args:
+        problem (InternalOptimizationProblem): Internal optimization problem to solve.
+        x0 (np.ndarray): Initial parameter vector of shape (n_params,).
+        n_cores (int):  Number of processes used to parallelize the function
+            evaluations.
+        configured_optimizer (ConfiguredOptimizer): Nevergrad optimizer instance
+            configured with options.
+        stopping_maxfun (int): Maximum number of function evaluations.
+        seed (int): Random seed for reproducibility. Defaults to None.
+
+    Returns:
+        InternalOptimizeResult: Internal optimization result
+
+    """
+
+    param = ng.p.Array(
+        init=x0,
+    )
+
+    param.set_bounds(
+        lower=problem.bounds.lower,
+        upper=problem.bounds.upper,
+    )
+
+    # In case bounds are not provided, the initial population is sampled
+    # from a gaussian with mean = 0 and sigma = 1,
+    # which can be set through this method.
+    param.set_mutation(sigma=sigma)
+
+    instrum = ng.p.Instrumentation(param)
+
+    if seed is not None:
+        instrum.random_state.seed(seed)
+
+    optimizer = configured_optimizer(
+        parametrization=instrum, budget=stopping_maxfun, num_workers=n_cores
+    )
+
+    # if nonlinear_constraints:
+    #     constraints = _process_nonlinear_constraints(nonlinear_constraints)
+
+    # optimization loop using the ask-and-tell interface
+    while optimizer.num_ask < stopping_maxfun:
+        x_list = [
+            optimizer.ask()
+            for _ in range(min(n_cores, stopping_maxfun - optimizer.num_ask))
+        ]
+
+        losses = problem.batch_fun([x.value[0][0] for x in x_list], n_cores=n_cores)
+
+        if not nonlinear_constraints:
             for x, loss in zip(x_list, losses, strict=True):
                 optimizer.tell(x, loss)
+        # else:
+        # constraint_violations = _batch_constraint_evaluations(
+        #     constraints, [x.value[0][0] for x in x_list], n_cores
+        # )
+        # for x, loss, cv in zip(x_list, losses, constraint_violations, strict=True):
+        #     optimizer.tell(x, loss, cv)
 
-        recommendation = optimizer.provide_recommendation()
+    recommendation = optimizer.provide_recommendation()
+    best_x = recommendation.value[0][0]
+    loss = recommendation.loss
+
+    # In case of CMA, loss is not provided by the optimizer, in that case,
+    # evaluate it manually using problem.fun
+    if loss is None:
+        loss = problem.fun(best_x)
+
+    result = InternalOptimizeResult(
+        x=best_x,
+        fun=loss,
+        success=True,
+        n_fun_evals=optimizer.num_ask,
+        n_jac_evals=0,
+        n_hess_evals=0,
+    )
+
+    return result
+
+
+### Skip handling of non_linear constraints until improve constraint handling.
+
+# def _process_nonlinear_constraints(
+#     constraints: list[dict[str, Any]],
+# ) -> list[dict[str, Any]]:
+#     """Process stacked inequality constraints as single constraints.
+
+#     Returns a list of single constraints.
+
+#     """
+#     processed_constraints = []
+#     for c in constraints:
+#         new = _vector_to_list_of_scalar(c)
+#         processed_constraints.extend(new)
+#     return processed_constraints
+
+
+# def _get_constraint_evaluations(
+#     constraints: list[dict[str, Any]], x: NDArray[np.float64]
+# ) -> list[NDArray[np.float64]]:
+#     """In optimagic, inequality constraints are internally defined as g(x) >= 0.
+#    Nevergrad uses h(x) <= 0 hence a sign flip is required. Passed equality
+#    constraints are treated as inequality constraints with lower bound equal to
+#    value. Return a list of constraint evaluations at x.
+
+#     """
+#     results = [-c["fun"](x) for c in constraints]
+#     results = [np.atleast_1d(i) for i in results]
+#     return results
 
-        result = InternalOptimizeResult(
-            x=recommendation.value[0][0],
-            fun=recommendation.loss,
-            success=True,
-            n_fun_evals=optimizer.num_ask,
-            n_jac_evals=0,
-            n_hess_evals=0,
-        )
 
-        return result
+# def _batch_constraint_evaluations(
+#     constraints: list[dict[str, Any]], x_list: list[Any], n_cores: int
+# ) -> list[list[NDArray[np.float64]]]:
+#     """Batch version of _get_constraint_evaluations."""
+#     batch = process_batch_evaluator("joblib")
+#     func = partial(_get_constraint_evaluations, constraints)
+#     results = batch(func=func, arguments=[x for x in x_list], n_cores=n_cores)
+#     return results
diff --git a/tests/optimagic/optimization/test_history_collection.py b/tests/optimagic/optimization/test_history_collection.py
index 743b8cf43..db27b6c20 100644
--- a/tests/optimagic/optimization/test_history_collection.py
+++ b/tests/optimagic/optimization/test_history_collection.py
@@ -38,7 +38,7 @@ def test_history_collection_with_parallelization(algorithm, tmp_path):
         params=np.arange(5),
         algorithm=algorithm,
         bounds=Bounds(lower=lb, upper=ub),
-        algo_options={"n_cores": 2, "stopping_maxiter": 3},
+        algo_options={"n_cores": 2, "stopping_maxiter": 3, "stopping_maxfun": 6},
         logging=SQLiteLogOptions(path=path, if_database_exists="replace"),
     ).history
 
diff --git a/tests/optimagic/optimizers/test_nevergrad.py b/tests/optimagic/optimizers/test_nevergrad.py
new file mode 100644
index 000000000..1e0582106
--- /dev/null
+++ b/tests/optimagic/optimizers/test_nevergrad.py
@@ -0,0 +1,132 @@
+"""Test helper functions for nevergrad optimizers."""
+
+from typing import get_args
+
+import numpy as np
+import pytest
+from numpy.testing import assert_array_almost_equal as aaae
+
+from optimagic import algorithms, mark
+from optimagic.config import IS_NEVERGRAD_INSTALLED
+from optimagic.optimization.optimize import minimize
+from optimagic.parameters.bounds import Bounds
+
+if IS_NEVERGRAD_INSTALLED:
+    import nevergrad as ng
+
+
+@mark.least_squares
+def sos(x):
+    return x
+
+
+### Nonlinear constraints on hold until improved handling.
+# def dummy_func():
+#     return lambda x: x
+
+
+# vec_constr = [
+#     {
+#         "type": "ineq",
+#         "fun": lambda x: [np.prod(x) + 1.0, 2.0 - np.prod(x)],
+#         "jac": dummy_func,
+#         "n_constr": 2,
+#     }
+# ]
+
+# constrs = [
+#     {
+#         "type": "ineq",
+#         "fun": lambda x: np.prod(x) + 1.0,
+#         "jac": dummy_func,
+#         "n_constr": 1,
+#     },
+#     {
+#         "type": "ineq",
+#         "fun": lambda x: 2.0 - np.prod(x),
+#         "jac": dummy_func,
+#         "n_constr": 1,
+#     },
+# ]
+
+
+# def test_process_nonlinear_constraints():
+#     got = _process_nonlinear_constraints(vec_constr)
+#     assert len(got) == 2
+
+
+# def test_get_constraint_evaluations():
+#     x = np.array([1, 1])
+#     got = _get_constraint_evaluations(constrs, x)
+#     expected = [np.array([-2.0]), np.array([-1.0])]
+#     assert got == expected
+
+
+# def test_batch_constraint_evaluations():
+#     x = np.array([1, 1])
+#     x_list = [x] * 2
+#     got = _batch_constraint_evaluations(constrs, x_list, 2)
+#     expected = [[np.array([-2.0]), np.array([-1.0])]] * 2
+#     assert got == expected
+
+
+# test if all optimizers listed in Literal type hint are valid attributes
+@pytest.mark.skipif(not IS_NEVERGRAD_INSTALLED, reason="nevergrad not installed")
+def test_meta_optimizers_are_valid():
+    opt = algorithms.NevergradMeta
+    optimizers = get_args(opt.__annotations__["optimizer"])
+    for optimizer in optimizers:
+        try:
+            getattr(ng.optimizers, optimizer)
+        except AttributeError:
+            pytest.fail(f"Optimizer '{optimizer}' not found in Nevergrad")
+
+
+@pytest.mark.skipif(not IS_NEVERGRAD_INSTALLED, reason="nevergrad not installed")
+def test_ngopt_optimizers_are_valid():
+    opt = algorithms.NevergradNGOpt
+    optimizers = get_args(opt.__annotations__["optimizer"])
+    for optimizer in optimizers:
+        try:
+            getattr(ng.optimizers, optimizer)
+        except AttributeError:
+            pytest.fail(f"Optimizer '{optimizer}' not found in Nevergrad")
+
+
+# list of available optimizers in nevergrad_meta
+NEVERGRAD_META = get_args(algorithms.NevergradMeta.__annotations__["optimizer"])
+# list of available optimizers in nevergrad_ngopt
+NEVERGRAD_NGOPT = get_args(algorithms.NevergradNGOpt.__annotations__["optimizer"])
+
+
+# test stochastic_global_algorithm_on_sum_of_squares
+@pytest.mark.slow
+@pytest.mark.parametrize("algorithm", NEVERGRAD_META)
+def test_meta_optimizers_with_stochastic_global_algorithm_on_sum_of_squares(algorithm):
+    res = minimize(
+        fun=sos,
+        params=np.array([0.35, 0.35]),
+        bounds=Bounds(lower=np.array([0.2, -0.5]), upper=np.array([1, 0.5])),
+        algorithm=algorithms.NevergradMeta(algorithm),
+        collect_history=False,
+        skip_checks=True,
+        algo_options={"seed": 12345},
+    )
+    assert res.success in [True, None]
+    aaae(res.params, np.array([0.2, 0]), decimal=1)
+
+
+@pytest.mark.slow
+@pytest.mark.parametrize("algorithm", NEVERGRAD_NGOPT)
+def test_ngopt_optimizers_with_stochastic_global_algorithm_on_sum_of_squares(algorithm):
+    res = minimize(
+        fun=sos,
+        params=np.array([0.35, 0.35]),
+        bounds=Bounds(lower=np.array([0.2, -0.5]), upper=np.array([1, 0.5])),
+        algorithm=algorithms.NevergradNGOpt(algorithm),
+        collect_history=False,
+        skip_checks=True,
+        algo_options={"seed": 12345},
+    )
+    assert res.success in [True, None]
+    aaae(res.params, np.array([0.2, 0]), decimal=1)