Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Resurrect Model learning #117

Merged
merged 25 commits into from
Jun 30, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
237a4c2
remove unused dataset file
lazyoracle May 19, 2021
49cfa31
add alias for opt.run()
lazyoracle Jun 24, 2021
f12d570
update key name for std
lazyoracle Jun 24, 2021
ae31484
fix best point read at par with c1 and c2
lazyoracle Jun 25, 2021
825ec15
update cfg to use new small dataset
lazyoracle Jun 29, 2021
c79e9b7
use system model based on example blackbox
lazyoracle Jun 29, 2021
eb4c4ea
rectify state labels for single qubit device
lazyoracle Jun 29, 2021
58e1bc9
set lbfgs maxfun to 50
lazyoracle Jun 29, 2021
cdcdff8
only 2 orbit data points are sufficient
lazyoracle Jun 29, 2021
e296244
exp hjson based on blackbox in Simulated Calibration
lazyoracle Jun 29, 2021
393f029
dataset based on few runs in Simulated Calibration
lazyoracle Jun 29, 2021
4fad92e
use correct dataset, exp hjson and state labels
lazyoracle Jun 29, 2021
f746081
add markers, add test for creating C3 style optim object
lazyoracle Jun 29, 2021
4f5782f
test model learning on a small dataset
lazyoracle Jun 29, 2021
ce9e3b7
remove commented code
lazyoracle Jun 29, 2021
47002de
outline for model learning example
lazyoracle Jun 29, 2021
fe8644d
understanding the dataset
lazyoracle Jun 29, 2021
4a3b7be
stop logging full exp.pmap for every evaluation
lazyoracle Jun 30, 2021
70e66b9
code to run model learning
lazyoracle Jun 30, 2021
dff091f
process, analyse, visualise model learning logs
lazyoracle Jun 30, 2021
636eb22
Update kernel spec to remove local sys info
lazyoracle Jun 30, 2021
341c13b
notebook to rst for docs
lazyoracle Jun 30, 2021
143c5f3
images for model learning docs
lazyoracle Jun 30, 2021
9a0ac03
update title C^2 -> C_2
lazyoracle Jun 30, 2021
8dfd606
incl model learning docs
lazyoracle Jun 30, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 7 additions & 10 deletions c3/optimizers/c3.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@ def __init__(
self.fom = g_LL_prime_combined
self.__dir_path = dir_path
self.__run_name = run_name
self.run = self.learn_model # Alias legacy name for optimization method

def log_setup(self) -> None:
"""
Expand All @@ -135,7 +136,6 @@ def log_setup(self) -> None:
)
self.logdir = log_setup(self.__dir_path, run_name)
self.logname = "model_learn.log"
# shutil.copy2(self.__real_model_folder, self.logdir)

def read_data(self, datafiles: Dict[str, str]) -> None:
"""
Expand Down Expand Up @@ -195,8 +195,8 @@ def learn_model(self) -> None:
)
except KeyboardInterrupt:
pass
with open(self.logdir + "best_point_" + self.logname, "r") as file:
best_params = hjson.loads(file.readlines()[1])["params"]
with open(os.path.join(self.logdir, "best_point_" + self.logname), "r") as file:
best_params = hjson.load(file)["optim_status"]["params"]
self.pmap.set_parameters(best_params)
self.pmap.model.update_model()
self.end_log()
Expand Down Expand Up @@ -244,14 +244,11 @@ def _log_one_dataset(
) -> None:
seqs_pp = self.seqs_per_point
m_vals = data_set["results"][:seqs_pp]
m_stds = np.array(data_set["result_stds"][:seqs_pp])
m_stds = np.array(data_set["results_std"][:seqs_pp])
m_shots = data_set["shots"][:seqs_pp]
sequences = data_set["seqs"][:seqs_pp]
with open(self.logdir + self.logname, "a") as logfile:
logfile.write(
f"\n Parameterset {ipar + 1}, #{count} of {len(indeces)}:\n"
f"{str(self.exp.pmap)}\n"
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Removing str(self.exp.pmap) is correct, but do we instead want to print the current parameters in human-readable form here? exp.str_parameters() should do the trick.

)
logfile.write(f"\n Parameterset {ipar + 1}, #{count} of {len(indeces)}:\n")
logfile.write(
"Sequence Simulation Experiment Std Shots" " Diff\n"
)
Expand Down Expand Up @@ -309,7 +306,7 @@ def goal_run(self, current_params: tf.constant) -> tf.float64:
count += 1
data_set = self.learn_from[ipar]
m_vals = data_set["results"][:seqs_pp]
m_stds = data_set["result_stds"][:seqs_pp]
m_stds = data_set["results_std"][:seqs_pp]
m_shots = data_set["shots"][:seqs_pp]
sequences = data_set["seqs"][:seqs_pp]
num_seqs = len(sequences)
Expand Down Expand Up @@ -388,7 +385,7 @@ def goal_run_with_grad(self, current_params):

seqs_pp = self.seqs_per_point
m_vals = data_set["results"][:seqs_pp]
m_stds = np.array(data_set["result_stds"][:seqs_pp])
m_stds = np.array(data_set["results_std"][:seqs_pp])
m_shots = data_set["shots"][:seqs_pp]
sequences = data_set["seqs"][:seqs_pp]
num_seqs = len(sequences)
Expand Down
Loading