Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
109 changes: 15 additions & 94 deletions c3/libraries/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,14 @@
optional arguments.
"""

import ast
from scipy.optimize import minimize as minimize
import cma.evolution_strategy as cma
import numpy as np
import warnings
from typing import Callable
import adaptive
import copy
import warnings
from scipy.optimize import OptimizeResult
import tensorflow as tf

Expand Down Expand Up @@ -74,37 +75,16 @@ def grid2D(x_init, fun=None, fun_grad=None, grad_lookup=None, options={}):
else:
points = 100

# probe_list = []
# if 'probe_list' in options:
# for x in options['probe_list']:
# probe_list.append(eval(x))

# if 'init_point' in options:
# init_point = bool(options.pop('init_point'))
# if init_point:
# probe_list.append(x_init)

bounds = options["bounds"][0]
bound_min = bounds[0]
bound_max = bounds[1]
# probe_list_min = np.min(np.array(probe_list)[:,0])
# probe_list_max = np.max(np.array(probe_list)[:,0])
# bound_min = min(bound_min, probe_list_min)
# bound_max = max(bound_max, probe_list_max)
xs = np.linspace(bound_min, bound_max, points)

bounds = options["bounds"][1]
bound_min = bounds[0]
bound_max = bounds[1]
# probe_list_min = np.min(np.array(probe_list)[:,1])
# probe_list_max = np.max(np.array(probe_list)[:,1])
# bound_min = min(bound_min, probe_list_min)
# bound_max = max(bound_max, probe_list_max)
ys = np.linspace(bound_min, bound_max, points)

# for p in probe_list:
# fun(p)

for x in xs:
for y in ys:
if "wrapper" in options:
Expand Down Expand Up @@ -205,6 +185,9 @@ def adaptive_scan(x_init, fun=None, fun_grad=None, grad_lookup=None, options={})
init_point : boolean
Include the initial point in the sampling
"""
warnings.warn(
"The Adaptive Scan algorithm is not thoroughly tested and might contain bugs"
)
if "accuracy_goal" in options:
accuracy_goal = options["accuracy_goal"]
else:
Expand All @@ -214,7 +197,7 @@ def adaptive_scan(x_init, fun=None, fun_grad=None, grad_lookup=None, options={})
probe_list = []
if "probe_list" in options:
for x in options["probe_list"]:
probe_list.append(eval(x))
probe_list.append(ast.literal_eval(x))

if "init_point" in options:
init_point = bool(options.pop("init_point"))
Expand Down Expand Up @@ -339,22 +322,7 @@ def tf_adam(
SciPy OptimizeResult type object with final parameters
"""
# TODO Update maxfun->maxiters, default hyperparameters and error handling
warnings.warn("The integration of this algorithm is incomplete and incorrect.")

iters = options["maxfun"]
var = tf.Variable(x_init)

def tf_fun():
return fun(var)

opt_adam = tf.keras.optimizers.Adam(learning_rate=0.001, epsilon=0.1)

for step in range(iters):
step_count = opt_adam.minimize(tf_fun, [var])
print(f"epoch {step_count.numpy()}: func_value: {tf_fun()}")

result = OptimizeResult(x=var.numpy(), success=True)
return result
raise NotImplementedError("This algorithm is not yet implemented.")


def tf_rmsprop(
Expand Down Expand Up @@ -386,25 +354,7 @@ def tf_rmsprop(
SciPy OptimizeResult type object with final parameters
"""
# TODO Update maxfun->maxiters, default hyperparameters and error handling
warnings.warn("The integration of this algorithm is incomplete and incorrect.")

iters = options["maxfun"]

var = tf.Variable(x_init)

def tf_fun():
return fun(var)

opt_rmsprop = tf.keras.optimizers.RMSprop(
learning_rate=0.1, epsilon=1e-2, centered=True
)

for step in range(iters):
step_count = opt_rmsprop.minimize(tf_fun, [var])
print(f"epoch {step_count.numpy()}: func_value: {tf_fun()}")

result = OptimizeResult(x=var.numpy(), success=True)
return result
raise NotImplementedError("This algorithm is not yet implemented.")


@algo_reg_deco
Expand Down Expand Up @@ -437,25 +387,7 @@ def tf_adadelta(
SciPy OptimizeResult type object with final parameters
"""
# TODO Update maxfun->maxiters, default hyperparameters and error handling
warnings.warn("The integration of this algorithm is incomplete and incorrect.")

iters = options["maxfun"]

var = tf.Variable(x_init)

def tf_fun():
return fun(var)

opt_adadelta = tf.keras.optimizers.Adadelta(
learning_rate=0.1, rho=0.95, epsilon=1e-2
)

for step in range(iters):
step_count = opt_adadelta.minimize(tf_fun, [var])
print(f"epoch {step_count.numpy()}: func_value: {tf_fun()}")

result = OptimizeResult(x=var.numpy(), success=True)
return result
raise NotImplementedError("This algorithm is not yet implemented.")


@algo_reg_deco
Expand Down Expand Up @@ -664,6 +596,9 @@ def gcmaes(x_init, fun=None, fun_grad=None, grad_lookup=None, options={}):
EXPERIMENTAL CMA-Es where every point in the cloud is optimized with LBFG-S and the
resulting cloud and results are used for the CMA update.
"""
warnings.warn(
"The GCMA-ES algorithm is not thoroughly tested and might contain bugs"
)
options_cma = options["cmaes"]

if "init_point" in options_cma:
Expand Down Expand Up @@ -732,20 +667,6 @@ def gcmaes(x_init, fun=None, fun_grad=None, grad_lookup=None, options={}):
return es.result.xbest


# def oneplusone(x_init, goal_fun):
# optimizer = algo_registry['OnePlusOne'](instrumentation=x_init.shape[0])
# while True:
# # TODO make this logging happen elsewhere
# # self.logfile.write(f"Batch {self.evaluation}\n")
# # self.logfile.flush()
# tmp = optimizer.ask()
# samples = tmp.args
# solutions = []
# for sample in samples:
# goal = goal_fun(sample)
# solutions.append(goal)
# optimizer.tell(tmp, solutions)
#
# # TODO deal with storing best value elsewhere
# # recommendation = optimizer.provide_recommendation()
# # return recommendation.args[0]
@algo_reg_deco
def oneplusone(x_init, goal_fun):
raise NotImplementedError("This algorithm is not yet implemented.")
48 changes: 48 additions & 0 deletions test/test_scan_algos.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
"""Test module for algorithms that scan a function across a given range.
We use a mock function that appends the parameter passed to it into a global
list. We then cross check this list against expected values.
"""

from typing import List
import numpy as np
from c3.libraries.algorithms import sweep, grid2D

params = list()
params2D = list()

# constants for sweep and grid2D parameters
X_INIT = [5.0]
POINTS = 5
BOUNDS = [[0.0, 2.5]]
BOUNDS_2D = [[0.0, 2.5], [0.0, 2.5]]
DESIRED_PARAMS = np.linspace(BOUNDS[0][0], BOUNDS[0][1], POINTS, dtype=float)
DESIRED_PARAMS_2D = [
[x, y]
for x in np.linspace(BOUNDS_2D[0][0], BOUNDS_2D[0][1], POINTS)
for y in np.linspace(BOUNDS_2D[1][0], BOUNDS_2D[1][1], POINTS)
]
INIT_POINT = False


def mock_fun(x: List[float]) -> None:
params.append(x[0])


def mock_fun2D(inputs: List[float]) -> None:
params2D.append(inputs)


def test_sweep() -> None:
"""Test c3.libraries.algorithms.sweep()"""
sweep(
X_INIT,
fun=mock_fun,
options={"points": POINTS, "bounds": BOUNDS, "init_point": INIT_POINT},
)
np.testing.assert_allclose(params, DESIRED_PARAMS)


def test_grid2D() -> None:
"""Test c3.libraries.algorithms.grid2D()"""
grid2D(X_INIT, fun=mock_fun2D, options={"points": POINTS, "bounds": BOUNDS_2D})
np.testing.assert_allclose(params2D, DESIRED_PARAMS_2D)
19 changes: 19 additions & 0 deletions test/test_two_qubits.py
Original file line number Diff line number Diff line change
Expand Up @@ -398,3 +398,22 @@ def test_optim_lbfgs() -> None:

lbfgs_opt.optimize_controls()
assert lbfgs_opt.current_best_goal < 0.01


@pytest.mark.optimizers
@pytest.mark.slow
@pytest.mark.integration
def test_optim_lbfgs_grad_free() -> None:
lbfgs_grad_free_opt = C1(
dir_path=logdir,
fid_func=fidelities.average_infid_set,
fid_subspace=["Q1", "Q2"],
pmap=pmap,
algorithm=algorithms.lbfgs_grad_free,
options={"maxfun": 5},
run_name="grad_free_lbfgs",
)
lbfgs_grad_free_opt.set_exp(exp)

lbfgs_grad_free_opt.optimize_controls()
assert lbfgs_grad_free_opt.current_best_goal < 0.01